repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cbertinato/pandas
|
pandas/core/arrays/numpy_.py
|
1
|
15297
|
import numbers
import numpy as np
from numpy.lib.mixins import NDArrayOperatorsMixin
from pandas._libs import lib
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
from pandas.core.dtypes.inference import is_array_like, is_list_like
from pandas import compat
from pandas.core import nanops
from pandas.core.algorithms import searchsorted
from pandas.core.missing import backfill_1d, pad_1d
from .base import ExtensionArray, ExtensionOpsMixin
class PandasDtype(ExtensionDtype):
"""
A Pandas ExtensionDtype for NumPy dtypes.
.. versionadded:: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
dtype : numpy.dtype
"""
_metadata = ('_dtype',)
def __init__(self, dtype):
dtype = np.dtype(dtype)
self._dtype = dtype
self._name = dtype.name
self._type = dtype.type
def __repr__(self):
return "PandasDtype({!r})".format(self.name)
@property
def numpy_dtype(self):
"""The NumPy dtype this PandasDtype wraps."""
return self._dtype
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def _is_numeric(self):
# exclude object, str, unicode, void.
return self.kind in set('biufc')
@property
def _is_boolean(self):
return self.kind == 'b'
@classmethod
def construct_from_string(cls, string):
return cls(np.dtype(string))
def construct_array_type(cls):
return PandasArray
@property
def kind(self):
return self._dtype.kind
@property
def itemsize(self):
"""The element size of this data-type object."""
return self._dtype.itemsize
class PandasArray(ExtensionArray, ExtensionOpsMixin, NDArrayOperatorsMixin):
"""
A pandas ExtensionArray for NumPy data.
.. versionadded :: 0.24.0
This is mostly for internal compatibility, and is not especially
useful on its own.
Parameters
----------
values : ndarray
The NumPy ndarray to wrap. Must be 1-dimensional.
copy : bool, default False
Whether to copy `values`.
Attributes
----------
None
Methods
-------
None
"""
# If you're wondering why pd.Series(cls) doesn't put the array in an
# ExtensionBlock, search for `ABCPandasArray`. We check for
# that _typ to ensure that that users don't unnecessarily use EAs inside
# pandas internals, which turns off things like block consolidation.
_typ = "npy_extension"
__array_priority__ = 1000
# ------------------------------------------------------------------------
# Constructors
def __init__(self, values, copy=False):
if isinstance(values, type(self)):
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError("'values' must be a NumPy array.")
if values.ndim != 1:
raise ValueError("PandasArray must be 1-dimensional.")
if copy:
values = values.copy()
self._ndarray = values
self._dtype = PandasDtype(values.dtype)
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
if isinstance(dtype, PandasDtype):
dtype = dtype._dtype
result = np.asarray(scalars, dtype=dtype)
if copy and result is scalars:
result = result.copy()
return cls(result)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
@classmethod
def _concat_same_type(cls, to_concat):
return cls(np.concatenate(to_concat))
# ------------------------------------------------------------------------
# Data
@property
def dtype(self):
return self._dtype
# ------------------------------------------------------------------------
# NumPy Array Interface
def __array__(self, dtype=None):
return np.asarray(self._ndarray, dtype=dtype)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
# Lightly modified version of
# https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/\
# numpy.lib.mixins.NDArrayOperatorsMixin.html
# The primary modification is not boxing scalar return values
# in PandasArray, since pandas' ExtensionArrays are 1-d.
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use PandasArray instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle PandasArray objects.
if not isinstance(x, self._HANDLED_TYPES + (PandasArray,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x._ndarray if isinstance(x, PandasArray) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x._ndarray if isinstance(x, PandasArray) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple and len(result):
# multiple return values
if not lib.is_scalar(result[0]):
# re-box array-like results
return tuple(type(self)(x) for x in result)
else:
# but not scalar reductions
return result
elif method == 'at':
# no return value
return None
else:
# one return value
if not lib.is_scalar(result):
# re-box array-like results, but not scalar reductions
result = type(self)(result)
return result
# ------------------------------------------------------------------------
# Pandas ExtensionArray Interface
def __getitem__(self, item):
if isinstance(item, type(self)):
item = item._ndarray
result = self._ndarray[item]
if not lib.is_scalar(item):
result = type(self)(result)
return result
def __setitem__(self, key, value):
from pandas.core.internals.arrays import extract_array
value = extract_array(value, extract_numpy=True)
if not lib.is_scalar(key) and is_list_like(key):
key = np.asarray(key)
if not lib.is_scalar(value):
value = np.asarray(value)
values = self._ndarray
t = np.result_type(value, values)
if t != self._ndarray.dtype:
values = values.astype(t, casting='safe')
values[key] = value
self._dtype = PandasDtype(t)
self._ndarray = values
else:
self._ndarray[key] = value
def __len__(self):
return len(self._ndarray)
@property
def nbytes(self):
return self._ndarray.nbytes
def isna(self):
from pandas import isna
return isna(self._ndarray)
def fillna(self, value=None, method=None, limit=None):
# TODO(_values_for_fillna): remove this
value, method = validate_fillna_kwargs(value, method)
mask = self.isna()
if is_array_like(value):
if len(value) != len(self):
raise ValueError("Length of 'value' does not match. Got ({}) "
" expected {}".format(len(value), len(self)))
value = value[mask]
if mask.any():
if method is not None:
func = pad_1d if method == 'pad' else backfill_1d
new_values = func(self._ndarray, limit=limit,
mask=mask)
new_values = self._from_sequence(new_values, dtype=self.dtype)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
def take(self, indices, allow_fill=False, fill_value=None):
from pandas.core.algorithms import take
result = take(self._ndarray, indices, allow_fill=allow_fill,
fill_value=fill_value)
return type(self)(result)
def copy(self, deep=False):
return type(self)(self._ndarray.copy())
def _values_for_argsort(self):
return self._ndarray
def _values_for_factorize(self):
return self._ndarray, -1
def unique(self):
from pandas import unique
return type(self)(unique(self._ndarray))
# ------------------------------------------------------------------------
# Reductions
def _reduce(self, name, skipna=True, **kwargs):
meth = getattr(self, name, None)
if meth:
return meth(skipna=skipna, **kwargs)
else:
msg = (
"'{}' does not implement reduction '{}'"
)
raise TypeError(msg.format(type(self).__name__, name))
def any(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_any((), dict(out=out, keepdims=keepdims))
return nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
def all(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_all((), dict(out=out, keepdims=keepdims))
return nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
def min(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_min((), dict(out=out, keepdims=keepdims))
return nanops.nanmin(self._ndarray, axis=axis, skipna=skipna)
def max(self, axis=None, out=None, keepdims=False, skipna=True):
nv.validate_max((), dict(out=out, keepdims=keepdims))
return nanops.nanmax(self._ndarray, axis=axis, skipna=skipna)
def sum(self, axis=None, dtype=None, out=None, keepdims=False,
initial=None, skipna=True, min_count=0):
nv.validate_sum((), dict(dtype=dtype, out=out, keepdims=keepdims,
initial=initial))
return nanops.nansum(self._ndarray, axis=axis, skipna=skipna,
min_count=min_count)
def prod(self, axis=None, dtype=None, out=None, keepdims=False,
initial=None, skipna=True, min_count=0):
nv.validate_prod((), dict(dtype=dtype, out=out, keepdims=keepdims,
initial=initial))
return nanops.nanprod(self._ndarray, axis=axis, skipna=skipna,
min_count=min_count)
def mean(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_mean((), dict(dtype=dtype, out=out, keepdims=keepdims))
return nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
def median(self, axis=None, out=None, overwrite_input=False,
keepdims=False, skipna=True):
nv.validate_median((), dict(out=out, overwrite_input=overwrite_input,
keepdims=keepdims))
return nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
def std(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='std')
return nanops.nanstd(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='var')
return nanops.nanvar(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def sem(self, axis=None, dtype=None, out=None, ddof=1, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='sem')
return nanops.nansem(self._ndarray, axis=axis, skipna=skipna,
ddof=ddof)
def kurt(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='kurt')
return nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
def skew(self, axis=None, dtype=None, out=None, keepdims=False,
skipna=True):
nv.validate_stat_ddof_func((), dict(dtype=dtype, out=out,
keepdims=keepdims),
fname='skew')
return nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
# ------------------------------------------------------------------------
# Additional Methods
def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result
@Appender(ExtensionArray.searchsorted.__doc__)
def searchsorted(self, value, side='left', sorter=None):
return searchsorted(self.to_numpy(), value,
side=side, sorter=sorter)
# ------------------------------------------------------------------------
# Ops
def __invert__(self):
return type(self)(~self._ndarray)
@classmethod
def _create_arithmetic_method(cls, op):
def arithmetic_method(self, other):
if isinstance(other, (ABCIndexClass, ABCSeries)):
return NotImplemented
elif isinstance(other, cls):
other = other._ndarray
with np.errstate(all="ignore"):
result = op(self._ndarray, other)
if op is divmod:
a, b = result
return cls(a), cls(b)
return cls(result)
return compat.set_function_name(arithmetic_method,
"__{}__".format(op.__name__),
cls)
_create_comparison_method = _create_arithmetic_method
PandasArray._add_arithmetic_ops()
PandasArray._add_comparison_ops()
|
bsd-3-clause
|
elkingtonmcb/scikit-learn
|
benchmarks/bench_plot_ward.py
|
290
|
1260
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
rebaltina/DAT210x
|
Module5/assignment7.py
|
1
|
6160
|
import random, math
import pandas as pd
import numpy as np
import scipy.io
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn import manifold
# If you'd like to try this lab with PCA instead of Isomap,
# as the dimensionality reduction technique:
Test_PCA = True
def plotDecisionBoundary(model, X, y):
print "Plotting..."
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.1
resolution = 0.1
#(2 for benign, 4 for malignant)
colors = {2:'royalblue',4:'lightsalmon'}
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
import numpy as np
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.seismic)
plt.axis('tight')
# Plot your testing points as well...
for label in np.unique(y):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
plt.show()
#
# TODO: Load in the dataset, identify nans, and set proper headers.
# Be sure to verify the rows line up by looking at the file in a text editor.
#
# .. your code here ..
df=pd.read_csv('c:/Users/User/workspace/DAT210x/Module5/Datasets/breast-cancer-wisconsin.data', names=['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status'])
#
# TODO: Copy out the status column into a slice, then drop it from the main
# dataframe. You can also drop the sample column, since that doesn't provide
# us with any machine learning power.
#
# .. your code here ..
status=df['status']
df=df.drop('status',1)
df=df.drop('sample',1)
#
# TODO: With the labels safely extracted from the dataset, replace any nan values
# with the mean feature / column value
#
# .. your code here ..
df.nuclei.unique()
df.nuclei = df.nuclei.replace('?',np.NaN)
df.nuclei = pd.to_numeric(df.nuclei)
df.nuclei = df.nuclei.fillna(df.nuclei.mean())
#
# TODO: Do train_test_split. Use the same variable names as on the EdX platform in
# the reading material, but set the random_state=7 for reproduceability, and keep
# the test_size at 0.5 (50%).
#
# .. your code here ..
data_train, data_test, label_train, label_test = train_test_split(df, status, test_size=0.5, random_state=7)
#
# TODO: Experiment with the basic SKLearn preprocessing scalers. We know that
# the features consist of different units mixed in together, so it might be
# reasonable to assume feature scaling is necessary. Print out a description
# of the dataset, post transformation.
#
# .. your code here ..
#scaling = preprocessing.StandardScaler()
#scaling = preprocessing.MinMaxScaler()
scaling = preprocessing.MaxAbsScaler()
#scaling = preprocessing.RobustScaler()
#scaling = preprocessing.Normalizer()
scaling.fit(data_train)
data_test = scaling.transform(data_test)
data_train = scaling.transform(data_train)
#
# PCA and Isomap are your new best friends
model = None
if Test_PCA:
print "Computing 2D Principle Components"
#
# TODO: Implement PCA here. save your model into the variable 'model'.
# You should reduce down to two dimensions.
#
# .. your code here ..
model = PCA(n_components=2)
model.fit(data_train)
T_pca_train = model.transform(data_train)
T_pca_test = model.transform(data_test)
else:
print "Computing 2D Isomap Manifold"
#
# TODO: Implement Isomap here. save your model into the variable 'model'
# Experiment with K values from 5-10.
# You should reduce down to two dimensions.
#
# .. your code here ..
model = manifold.Isomap(n_neighbors=5, n_components=2)
model.fit(data_train)
data_train = model.transform(data_train)
data_test = model.transform(data_test)
#
# TODO: Train your model against data_train, then transform both
# data_train and data_test using your model. You can save the results right
# back into the variables themselves.
#
# .. your code here ..
model.fit(data_train)
data_train = model.transform(data_train)
data_test = model.transform(data_test)
#
# TODO: Implement and train KNeighborsClassifier on your projected 2D
# training data here. You can use any K value from 1 - 15, so play around
# with it and see what results you can come up. Your goal is to find a
# good balance where you aren't too specific (low-K), nor are you too
# general (high-K). You should also experiment with how changing the weights
# parameter affects the results.
#
# .. your code here ..
knmodel = KNeighborsClassifier(n_neighbors=3, weights='distance')
knmodel.fit(data_train, label_train)
#
# INFO: Be sure to always keep the domain of the problem in mind! It's
# WAY more important to errantly classify a benign tumor as malignant,
# and have it removed, than to incorrectly leave a malignant tumor, believing
# it to be benign, and then having the patient progress in cancer. Since the UDF
# weights don't give you any class information, the only way to introduce this
# data into SKLearn's KNN Classifier is by "baking" it into your data. For
# example, randomly reducing the ratio of benign samples compared to malignant
# samples from the training set.
#
# TODO: Calculate + Print the accuracy of the testing set
#
# .. your code here ..
knmodel.score(data_test, label_test)
plotDecisionBoundary(knmodel, data_test, label_test)
|
mit
|
mprelee/data-incubator-capstone
|
src/parse_stories.py
|
1
|
5568
|
# parse_ages.py
# Try and find age info in each story
# Matt Prelee
# POSSIBLE TO DO:
# parse "baby boy" and "baby girl"
import pickle
import re
import time
from math import floor
import numpy as np
import pandas as pd
import nltk
from text2num import text2num
from config import SCRAPE_OUTPUT,STORY_DATA,AGE_GROUP_BINS,AGE_GROUP_LABELS
# List of strings to consider as "one" unit
AGE_STRINGS_AS_ONE = ['a','few','just']
# Gender specific pronouns
# http://en.wikipedia.org/wiki/Gender-specific_and_gender-neutral_pronouns
MALE_PRONOUNS = ['he','him','his','himself']
FEMALE_PRONOUNS = ['she','her','hers','herself']
# (:?) is a noncapturing group, \u00BD is 1/2 unicode
age_regex = re.compile(ur'(\w+)[- ](?:and-a-half-|\u00BD-)?(years?|months?|weeks?|days?)[- ]old',re.UNICODE)
newborn_regex = re.compile(ur'newborn',re.UNICODE)
def parse_ages(stories) :
''' Search for age information.
stories is a list of 'story' strings
returns a list of ages, same length as stories
Newborns are considered to have age zero
All non-year ages are quantized by taking a floor of the value i.e. 8 months is
zero, 14 months is age 1, etc.
'few','a' are considered to be 1 unit
'''
ages = []
count = 0
for story in stories :
age_match = age_regex.search(story)
newborn_match = newborn_regex.search(story)
if age_match is not None :
age_str = age_match.groups()[0]
# Parse unit as ascii and make lowercase
unit = age_match.groups()[1].encode('ascii','replace').lower()
try :
age_unitless = int(age_str)
except ValueError :
try :
age_unitless = text2num(age_str.encode('ascii','replace').lower())
except :
if age_str in AGE_STRINGS_AS_ONE:
age_unitless = 1
else :
# If problem parsing, assume it is a small number
print 'Error parsing \'%s\' into a number, converting to zero' % age_str
age_unitless = 0
print story
count+=1
if unit in ['year','years'] :
age = age_unitless
elif unit in ['month','months'] :
age = floor(float(age_unitless)/12.)
#print '%d months converted to %d years' % (age_unitless,age)
elif unit in ['week','weeks'] :
age = floor(float(age_unitless)/52.)
#print '%d weeks converted to %d years' % (age_unitless,age)
elif unit in ['day','days'] :
age = floor(float(age_unitless)/365.)
#print '%d days converted to %d years' % (age_unitless,age)
elif newborn_match is not None :
age = 0
count+=1
else :
age = None
#print story
ages.append(age)
print 'Identified %d ages out of %d patients' % (count,len(stories))
return ages
def parse_gender(stories) :
gender_data = []
start_time = time.time()
for story in stories :
tokens = nltk.word_tokenize(story) # Tokenize words
tags = nltk.pos_tag(tokens) # Tag as parts of speech (POS)
# PRON tag is what we are looking for
male_pro = []
female_pro = []
for (word,pos) in tags :
if pos == 'PRP' or pos == 'PRP$':
if word.lower() in MALE_PRONOUNS :
male_pro.append(word)
elif word.lower() in FEMALE_PRONOUNS :
female_pro.append(word)
if len(male_pro) > len(female_pro) :
gender = 'M'
elif len(male_pro) < len(female_pro) :
gender = 'F'
else :
gender = None
gender_data.append({'num_male_pro':len(male_pro),\
'num_female_pro':len(female_pro),\
'gender':gender})
end_time = time.time()
print 'Estimated %d genders out of %d patients in %d seconds' % \
(sum([g['gender'] is not None for g in gender_data]),len(stories),\
end_time-start_time)
return gender_data
def parse_stories() :
# Get parsed transparency document
scrape_db = pickle.load(open(SCRAPE_OUTPUT,'rb'))
# Grab stories
stories = scrape_db['story'].values
# Generate ages series
ages = pd.Series(data=parse_ages(stories),index=scrape_db.index,name='age')
# Generate gender data series
gender_data = data=parse_gender(stories)
genders = pd.Series(data=[g['gender'] for g in gender_data],\
index=scrape_db.index,name='gender')
num_male_pro = pd.Series(data=[g['num_male_pro'] for g in gender_data],\
index=scrape_db.index,name='num_male_pro')
num_female_pro = pd.Series(data=[g['num_female_pro'] for g in gender_data],\
index=scrape_db.index,name='num_female_pro')
# Add a binary gender column for regression
genders_binary_list = [{'M':-1,'F':1,None:0}[g['gender']] for g in gender_data]
genders_binary = pd.Series(data=genders_binary_list,index=scrape_db.index,name='gender_binary')
# Concatenate names
story_db = pd.concat([ages,genders,genders_binary,num_male_pro,num_female_pro],axis=1)
# Define some age groups
story_db['age_group'] = pd.cut(story_db['age'],AGE_GROUP_BINS,labels=AGE_GROUP_LABELS)
#print story_db
# Save to file
pickle.dump(story_db,open(STORY_DATA,'wb'))
if __name__ == '__main__' :
parse_stories()
|
gpl-2.0
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/scripts/venn_mpl.py
|
2
|
4952
|
#!/usr/bin/env python
"""
Given 3 files, creates a 3-way Venn diagram of intersections using matplotlib; \
see :mod:`pybedtools.contrib.venn_maker` for more flexibility.
Numbers are placed on the diagram. If you don't have matplotlib installed.
try venn_gchart.py to use the Google Chart API instead.
The values in the diagram assume:
* unstranded intersections
* no features that are nested inside larger features
"""
import argparse
import sys
import os
import pybedtools
def venn_mpl(a, b, c, colors=None, outfn='out.png', labels=None):
"""
*a*, *b*, and *c* are filenames to BED-like files.
*colors* is a list of matplotlib colors for the Venn diagram circles.
*outfn* is the resulting output file. This is passed directly to
fig.savefig(), so you can supply extensions of .png, .pdf, or whatever your
matplotlib installation supports.
*labels* is a list of labels to use for each of the files; by default the
labels are ['a','b','c']
"""
try:
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
except ImportError:
sys.stderr.write('matplotlib is required to make a Venn diagram with %s\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
a = pybedtools.BedTool(a)
b = pybedtools.BedTool(b)
c = pybedtools.BedTool(c)
if colors is None:
colors = ['r','b','g']
radius = 6.0
center = 0.0
offset = radius / 2
if labels is None:
labels = ['a','b','c']
circle_a = Circle(xy = (center-offset, center+offset), radius=radius, edgecolor=colors[0], label=labels[0])
circle_b = Circle(xy = (center+offset, center+offset), radius=radius, edgecolor=colors[1], label=labels[1])
circle_c = Circle(xy = (center, center-offset), radius=radius, edgecolor=colors[2], label=labels[2])
fig = plt.figure(facecolor='w')
ax = fig.add_subplot(111)
for circle in (circle_a, circle_b, circle_c):
circle.set_facecolor('none')
circle.set_linewidth(3)
ax.add_patch(circle)
ax.axis('tight')
ax.axis('equal')
ax.set_axis_off()
kwargs = dict(horizontalalignment='center')
# Unique to A
ax.text( center-2*offset, center+offset, str((a - b - c).count()), **kwargs)
# Unique to B
ax.text( center+2*offset, center+offset, str((b - a - c).count()), **kwargs)
# Unique to C
ax.text( center, center-2*offset, str((c - a - b).count()), **kwargs)
# A and B not C
ax.text( center, center+2*offset-0.5*offset, str((a + b - c).count()), **kwargs)
# A and C not B
ax.text( center-1.2*offset, center-0.5*offset, str((a + c - b).count()), **kwargs)
# B and C not A
ax.text( center+1.2*offset, center-0.5*offset, str((b + c - a).count()), **kwargs)
# all
ax.text( center, center, str((a + b + c).count()), **kwargs)
ax.legend(loc='best')
fig.savefig(outfn)
plt.close(fig)
def main():
"""Create a 3-way Venn diagram, using matplotlib"""
op = argparse.ArgumentParser(description=__doc__, prog=sys.argv[0])
op.add_argument('-a', help='File to use for the left-most circle')
op.add_argument('-b', help='File to use for the right-most circle')
op.add_argument('-c', help='File to use for the bottom circle')
op.add_argument('--labels',
help='Optional comma-separated list of '
'labels for a, b, and c', default='a,b,c')
op.add_argument('--colors', default='r,b,g',
help='Comma-separated list of matplotlib-valid colors '
'for circles a, b, and c. E.g., --colors=r,b,k')
op.add_argument('-o', default='out.png',
help='Output file to save as. Extension is '
'meaningful, e.g., out.pdf, out.png, out.svg. Default is "%(default)s"')
op.add_argument('--test', action='store_true', help='run test, overriding all other options.')
options = op.parse_args()
reqd_args = ['a','b','c']
if not options.test:
for ra in reqd_args:
if not getattr(options,ra):
op.print_help()
sys.stderr.write('Missing required arg "%s"\n' % ra)
sys.exit(1)
if options.test:
pybedtools.bedtool.random.seed(1)
a = pybedtools.example_bedtool('rmsk.hg18.chr21.small.bed')
b = pybedtools.example_bedtool('venn.b.bed')
c = pybedtools.example_bedtool('venn.c.bed')
options.a = a.fn
options.b = b.fn
options.c = c.fn
options.colors='r,b,g'
options.o = 'out.png'
options.labels = 'a,b,c'
venn_mpl(a=options.a, b=options.b, c=options.c,
colors=options.colors.split(','),
labels=options.labels.split(','),
outfn=options.o)
if __name__ == "__main__":
import doctest
if doctest.testmod(optionflags=doctest.ELLIPSIS).failed == 0:
main()
|
apache-2.0
|
caganze/wisps
|
wisps/simulations/euclid.py
|
1
|
3455
|
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import pandas as pd
import pymc3 as pm
import seaborn as sns
import numba
from scipy import integrate
from .binaries import make_systems
from wisps.utils.tools import get_distance
from tqdm import tqdm
import wisps
import wisps.simulations as wispsim
#constant distance
EUCLID_SOUTH=SkyCoord(l=24.6*u.deg, b=-82.0*u.deg , frame='galactic').galactic
EUCLID_NORTH=SkyCoord("18:0:0 66:33:0", obstime="J2000", unit=u.deg).galactic
EUCLID_FORNAX=SkyCoord("3:32:28.0 -27:48:30" , obstime="J2000", unit=u.deg).galactic
#mag limits
EUCLID_MAG_LIMITS={'J': 27., 'H': 27.}
#absol=#wisps.absolute_magnitude_jh(wispsim.SPGRID)[1]
#RELJ=wisps.POLYNOMIAL_RELATIONS['abs_mags']['EUCLID_J']
RELH=wisps.POLYNOMIAL_RELATIONS['abs_mags']['EUCLID_H']
absol=(RELH[0])(np.random.normal(wispsim.SPGRID, RELH[1]))
DMAXS=dict(zip(wispsim.SPGRID, (wisps.get_distance(absol, np.ones_like(absol)*EUCLID_MAG_LIMITS['H']))))
#constants
Rsun=wispsim.Rsun
Zsun=wispsim.Zsun
def distance_sampler(l, b, nsample=1000, h=300, dmax=1000):
"""
sample the galaxy given a scale height
l and b must be in radian
"""
def logp(l, b, r, z, d, h):
return np.log((d**2)*wispsim.density_function(r, z, h))
with pm.Model() as model:
d=pm.Uniform('d', lower=0., upper=dmax, testval=10.,)
x=pm.Deterministic('x', Rsun-d*np.cos(b)*np.cos(l))
y=pm.Deterministic('y', -d*np.cos(b)*np.sin(l))
r=pm.Deterministic('r', (x**2+y**2)**0.5 )
z=pm.Deterministic('z', Zsun+ d * np.sin(b))
like = pm.DensityDist('likelihood', logp, observed={'l':l, 'b':b,
'r': r, 'z': z, 'd':d, 'h':h})
trace = pm.sample(draws=int(nsample), cores=4, step=pm.Metropolis(), tune=int(nsample/20), discard_tuned_samples=True)
return trace
@np.vectorize
def euclid_selection_function(j, h):
#a simple step-function selection function based on mag cuts
s=0.
if j <EUCLID_MAG_LIMITS['J']:
s=1.
if h<EUCLID_MAG_LIMITS['H']:
s=1.
return s
def expected_numbers(model, field='fornax', h=300):
#compute exepected numbers in euclid fields based on different model based on a mode
#spectral type
syst=make_systems(model_name=model, bfraction=0.2)
sortedindx=np.argsort((syst['system_spts']).flatten())
spts=((syst['system_spts']).flatten())[sortedindx]
#
round_spts=np.round(spts).astype(float).flatten()
print (round_spts.shape)
#distances
dists=None
ds=np.zeros(len(spts))
coordinate_field=None
if field=='fornax':
coordinate_field=EUCLID_FORNAX
if field=='south':
coordinate_field=EUCLID_SOUTH
if field=='north':
coordinate_field=EUCLID_NORTH
for k in DMAXS.keys():
trace=distance_sampler(coordinate_field.l.radian, coordinate_field.b.radian, dmax=DMAXS[k], nsample=1000, h=h)
indx= (round_spts==k)
ds[indx]=np.random.choice(trace['d'].flatten(), len(round_spts[indx]))
absjs, abshs=wisps.absolute_magnitude_jh(spts)
dists=ds
appjs=absjs+5*np.log10(dists/10.0)
apphs=abshs+5*np.log10(dists/10.0)
#selection probabilities
s=euclid_selection_function(appjs, apphs)
#teffs are for normalizing the LF
return {'spt': spts, 'ds': dists, 'j':appjs, 'h':apphs, 'prob': s, 'teff': ((syst['system_teff']).flatten())[sortedindx]}
|
mit
|
rbaravalle/imfractal
|
imfractal/Algorithm/Local_MFS_Pyramid_3D.py
|
1
|
8419
|
"""
Copyright (c) 2016 Rodrigo Baravalle
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from Algorithm import *
import numpy as np
from math import log10
import scipy.signal
import scipy.io as sio
from MFS_3D import *
class Local_MFS_Pyramid_3D (Algorithm):
"""
:3D implementation of Local MFS through holder exponents f(alpha)
:Several MFS are computed on a single domain, from which then
:a set of operations produces features (Pyramid version)
:version: 1.0
:author: Rodrigo Baravalle
"""
def __init__(self):
pass
def setDef(self, ind, f, ite, filename, file_mask, params):
# parameters: ind -> determines how many levels are used when computing the density
# choose 1 for using directly the image measurement im or
# >= 6 for computing the density of im (quite stable for >=5)
# f ----> determines the dimension of MFS vector
# ite ---> determines how many levels are used when computing MFS for each
self.ind_num = ind # number of pixels for averaging
self.f_num = f # window
self.ite_num = ite
self.filename = filename
self.file_mask = file_mask
self.params = params
def gauss_kern(self,size_x, size_y, size_z):
""" Returns a normalized 3D gauss kernel array for convolutions """
m = np.float32(size_x)
n = np.float32(size_y)
o = np.float32(size_z)
sigma = 2; # ???
if(size_x <= 3): sigma = 1.5;
if(size_x == 5): sigma = 2.5;
z, y, x = np.mgrid[-(m-1)/2:(m-1)/2+1, -(n-1)/2:(n-1)/2+1, -(o-1)/2:(o-1)/2+1]
b = 2*(sigma**2)
square = lambda i : i**2
fm = lambda i: map(square, i)
x2 = map(fm, x)
y2 = map(fm, y)
z2 = map(fm, z)
g = np.sum([x2, y2, z2], axis=0).astype(np.float32)
g = np.exp(g).astype(np.float32)
return g / g.sum()
def determine_threshold(self, arr):
# compute histogram of values
bins = range(np.min(arr), np.max(arr) + 1)
h = np.histogram(arr, bins=bins)
threshold = np.min(arr)
# get x% of mass -> threshold
assert (len(arr.shape) == 3)
total_pixels = arr.shape[0] * arr.shape[1] * arr.shape[2]
for i in range(len(bins) + 1):
# compute sum of h(x) from x = 0 to x = i
partial_sum_vector = np.cumsum(h[0][: (i + 1)])
partial_sum = partial_sum_vector[len(partial_sum_vector) - 1]
percentage = (float)(partial_sum) / (float)(total_pixels)
if percentage > 0.75:
threshold = np.min(arr) + i
break
return threshold
def openMatlab(self, name, filename, greyscale):
import scipy.io as sio
arr = np.array(sio.loadmat(filename)[name]).astype(np.int32)
if greyscale:
return arr
if name == "S":
threshold = self.determine_threshold(arr)
arr = arr > threshold
a_v = arr.cumsum()
print "Amount of white pixels: ", a_v[len(a_v) - 1]
# debug - to see the spongious structure
# plt.imshow((arr[:,:,50]), cmap=plt.gray())
# plt.show()
return arr
def getFDs(self):
"""
@param string filename : volume location
@param string file_mask : mask volume location
@return [float] : 3D (local) multi fractal dimentions
@author: Rodrigo Baravalle.
"""
# data is a 3D grayscale volume
data = self.openMatlab('S', self.filename, True)
data_mask = self.openMatlab('M', self.file_mask, True)
# use the base 3D MFS with the same parameters
# fix me - parameter handling
base_MFS = MFS_3D()
base_MFS.setDef(self.ind_num, self.f_num, self.ite_num,
self.filename, self.file_mask, self.params)
# Masking
data = data * (data_mask > 0)
# Other multifractal measures
if self.params['gradient'] == True:
data = base_MFS.gradient(data)
else:
if self.params['laplacian'] == True:
print "laplacian!"
data = base_MFS.laplacian(data)
import scipy.ndimage.interpolation
if False: # for the paper
import ntpath
filee = ntpath.basename(self.filename)
if filee == 'BA01_120_2Slices.mat':
data2 = data[130,:,:]
#import Image
#img = Image.fromarray(data[40])
#image = Image.open(fname).convert("L")
#arr = np.asarray(image)
import matplotlib.pyplot as plt
f = plt.figure()
plt.subplots_adjust(hspace=.001, wspace=.001)
for i in range(5):
f.add_subplot(1,5,i+1)
plt.axis('off')
f.tight_layout()
plt.imshow(data2, cmap='bone')
data2 = scipy.ndimage.interpolation.zoom(data2, 0.5, order=3)
plt.show()
#img.show()
exit()
else: return
result = []
data_orig = data
for i in range(5):
xs, ys, zs = data.shape
num_divisions = 1
xs_d = xs / num_divisions
ys_d = ys / num_divisions
zs_d = zs / num_divisions
dims = 6
local_mfs = np.zeros((num_divisions, num_divisions, num_divisions, 20))
min_diff = 10000.0
max_diff = -10000.0
for i in range(num_divisions):
for j in range(num_divisions):
for k in range(num_divisions):
print "NEXT LOCAL MFS...", i*num_divisions*num_divisions + j*num_divisions + k
mfs = base_MFS.getFDs(
data[i * xs_d : (i + 1)*xs_d,
j * ys_d : (j + 1)*ys_d,
k * zs_d : (k + 1)*zs_d])
print mfs
local_mfs[i, j, k] = mfs
d = np.max(mfs) - np.min(mfs)
if d < min_diff:
min_diff = d
if d > max_diff:
max_diff = d
max_fa = np.max(local_mfs)
min_fa = np.min(local_mfs)
std_fa = np.std(local_mfs)
mean_fa = np.mean(local_mfs)
#result = np.hstack(( np.array([max_fa, min_fa,
# mean_fa, std_fa,
# max_diff, min_diff]) ,
# result))
# Pure MFS (Global) Pyramid
result = np.hstack((local_mfs[0,0,0], result))
# downscale volume to its half
data = scipy.ndimage.interpolation.zoom(data, 0.5, order=3)
return result
|
bsd-3-clause
|
amraboelela/swift
|
utils/dev-scripts/scurve_printer.py
|
37
|
2875
|
#!/usr/bin/env python
# This is a simple script that takes in an scurve file produced by
# csvcolumn_to_scurve and produces a png graph of the scurve.
import argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
FIELDS = ['N/total', 'New/Old']
def get_data(input_file):
global FIELDS
for row in csv.DictReader(input_file):
yield (float(row[FIELDS[0]]), float(row[FIELDS[1]]))
def main():
p = argparse.ArgumentParser()
p.add_argument('input_csv_file', type=argparse.FileType('r'))
p.add_argument('output_file', type=str)
p.add_argument('-y-axis-num-tick-marks', type=int,
help='The number of y tick marks to use above/below zero.')
p.add_argument('-y-axis-min', type=float,
help='Override the min y axis that we use')
p.add_argument('-y-axis-max', type=float,
help='Override the min y axis that we use')
p.add_argument('-title', type=str,
help='Title of the graph')
p.add_argument('-x-axis-title', type=str,
help='The title to use on the x-axis of the graph')
p.add_argument('-y-axis-title', type=str,
help='The title to use on the x-axis of the graph')
args = p.parse_args()
data = np.array(list(get_data(args.input_csv_file)))
assert np.all(data >= 0)
x = data[:, 0]
y = data[:, 1]
x_axis_title = args.x_axis_title or FIELDS[0]
y_axis_title = args.y_axis_title or FIELDS[1]
title = args.title or "{} vs {}".format(x_axis_title, y_axis_title)
fig, ax = plt.subplots()
fig.set_size_inches(18.5, 18.5)
fig.suptitle(title, fontsize=20)
ax.set_xlabel(x_axis_title, fontsize=20)
ax.set_ylabel(y_axis_title, fontsize=20)
ax.plot(x, y)
ax.scatter(x, y)
# To get good bounds, we:
#
# 1. Re-center our data at 0 by subtracting 1. This will give us the %
# difference in between new and old (i.e. (new - old)/old)
#
# 2. Then we take the maximum absolute delta from zero and round to a
# multiple of 5 away from zero. Lets call this value limit.
#
# 3. We set [min_y, max_y] = [1.0 - limit, 1.0 + limit]
recentered_data = y - 1.0
max_magnitude = int(np.max(np.abs(recentered_data)) * 100.0)
y_limit = float(((max_magnitude // 5) + 1) * 5) * 0.01
ax.set_xlim(0.0, 1.0)
y_min = args.y_axis_min or 1.0 - y_limit
y_max = args.y_axis_max or 1.0 + y_limit
assert(y_min <= y_max)
ax.set_ylim(y_min, y_max)
ax.grid(True)
ax.xaxis.set_ticks(np.arange(0.0, 1.0, 0.05))
if args.y_axis_num_tick_marks:
y_delta = y_max - y_min
y_tickmark_frequency = y_delta / float(args.y_axis_num_tick_marks)
ax.yaxis.set_ticks(np.arange(y_min, y_max, y_tickmark_frequency))
plt.savefig(args.output_file)
if __name__ == "__main__":
main()
|
apache-2.0
|
jniediek/mne-python
|
examples/time_frequency/plot_temporal_whitening.py
|
9
|
1849
|
"""
================================
Temporal whitening with AR model
================================
This script shows how to fit an AR model to data and use it
to temporally whiten the signals.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:5]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 1e4:2e4] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
|
bsd-3-clause
|
maroy/TSTA
|
p3/nfl_clusterer_15_60.py
|
1
|
2304
|
import re
import sys
import json
import time
import datetime
import numpy
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from tweets import Tweets
tweets = None
def cluster(data, k, stop_words):
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=stop_words)
td_matrix = vectorizer.fit_transform(data)
km = KMeans(n_clusters=k, init='k-means++', max_iter=200, n_jobs=-1)
km.fit(td_matrix)
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
def count(acc,value):
acc[value] += 1
return acc
cluster_counts = reduce(count, km.labels_, [0]*k)
result = []
for i in reversed(numpy.array(cluster_counts).argsort()):
x = [float(cluster_counts[i])/len(data)]
for ind in order_centroids[i, :10]:
x.append(terms[ind])
result.append(x)
return result
def dot():
sys.stdout.write('.')
sys.stdout.flush()
def main():
t0 = time.time()
folder = 'text/'
tweets = Tweets.load_from_folder(folder)
print "Tweets loaded {0}s".format(time.time() - t0)
duration = 60
results = []
runs = [
"nhl", "any", "nba", "nfl"
]
for run in runs:
t0 = time.time()
for day in range(7,28):
for hour in range(0,24):
for minute in [0,15,30,45]:
end = datetime.datetime(2014, 11, day, hour=hour, minute=minute)
start = end - datetime.timedelta(seconds=60 * duration)
data = tweets.get_collection(start, end, run if run != 'any' else None)
if len(data) == 0:
break
result_date = start.strftime('%Y-%m-%d %H:%M') + " - " + end.strftime('%Y-%m-%d %H:%M')
result_clusters = cluster(data,5, [])
results.append({"date": result_date, "clusters": result_clusters})
#dot()
print end, len(data)
print
with open("viz/" + run + "_15_60.json", "w") as f:
json.dump(results, f)
print run + ' done, ', time.time() - t0, 'seconds'
if __name__ == "__main__":
main()
|
mit
|
bradmontgomery/ml
|
book/ch09/utils.py
|
24
|
5568
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import sys
from matplotlib import pylab
import numpy as np
DATA_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data")
CHART_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "charts")
for d in [DATA_DIR, CHART_DIR]:
if not os.path.exists(d):
os.mkdir(d)
# Put your directory to the different music genres here
GENRE_DIR = None
GENRE_LIST = ["classical", "jazz", "country", "pop", "rock", "metal"]
# Put your directory to the test dir here
TEST_DIR = None
if GENRE_DIR is None or TEST_DIR is None:
print("Please set GENRE_DIR and TEST_DIR in utils.py")
sys.exit(1)
def plot_confusion_matrix(cm, genre_list, name, title):
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(genre_list)))
ax.set_xticklabels(genre_list)
ax.xaxis.set_ticks_position("bottom")
ax.set_yticks(range(len(genre_list)))
ax.set_yticklabels(genre_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.show()
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig(
os.path.join(CHART_DIR, "confusion_matrix_%s.png" % name), bbox_inches="tight")
def plot_pr(auc_score, name, precision, recall, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.fill_between(recall, precision, alpha=0.5)
pylab.plot(recall, precision, lw=1)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('Recall')
pylab.ylabel('Precision')
pylab.title('P/R curve (AUC = %0.2f) / %s' % (auc_score, label))
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "pr_" + filename + ".png"), bbox_inches="tight")
def plot_roc(auc_score, name, tpr, fpr, label=None):
pylab.clf()
pylab.figure(num=None, figsize=(5, 4))
pylab.grid(True)
pylab.plot([0, 1], [0, 1], 'k--')
pylab.plot(fpr, tpr)
pylab.fill_between(fpr, tpr, alpha=0.5)
pylab.xlim([0.0, 1.0])
pylab.ylim([0.0, 1.0])
pylab.xlabel('False Positive Rate')
pylab.ylabel('True Positive Rate')
pylab.title('ROC curve (AUC = %0.2f) / %s' %
(auc_score, label), verticalalignment="bottom")
pylab.legend(loc="lower right")
filename = name.replace(" ", "_")
pylab.savefig(
os.path.join(CHART_DIR, "roc_" + filename + ".png"), bbox_inches="tight")
def show_most_informative_features(vectorizer, clf, n=20):
c_f = sorted(zip(clf.coef_[0], vectorizer.get_feature_names()))
top = zip(c_f[:n], c_f[:-(n + 1):-1])
for (c1, f1), (c2, f2) in top:
print("\t%.4f\t%-15s\t\t%.4f\t%-15s" % (c1, f1, c2, f2))
def plot_log():
pylab.clf()
x = np.arange(0.001, 1, 0.001)
y = np.log(x)
pylab.title('Relationship between probabilities and their logarithm')
pylab.plot(x, y)
pylab.grid(True)
pylab.xlabel('P')
pylab.ylabel('log(P)')
filename = 'log_probs.png'
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_feat_importance(feature_names, clf, name):
pylab.clf()
coef_ = clf.coef_
important = np.argsort(np.absolute(coef_.ravel()))
f_imp = feature_names[important]
coef = coef_.ravel()[important]
inds = np.argsort(coef)
f_imp = f_imp[inds]
coef = coef[inds]
xpos = np.array(range(len(coef)))
pylab.bar(xpos, coef, width=1)
pylab.title('Feature importance for %s' % (name))
ax = pylab.gca()
ax.set_xticks(np.arange(len(coef)))
labels = ax.set_xticklabels(f_imp)
for label in labels:
label.set_rotation(90)
filename = name.replace(" ", "_")
pylab.savefig(os.path.join(
CHART_DIR, "feat_imp_%s.png" % filename), bbox_inches="tight")
def plot_feat_hist(data_name_list, filename=None):
pylab.clf()
num_rows = 1 + (len(data_name_list) - 1) / 2
num_cols = 1 if len(data_name_list) == 1 else 2
pylab.figure(figsize=(5 * num_cols, 4 * num_rows))
for i in range(num_rows):
for j in range(num_cols):
pylab.subplot(num_rows, num_cols, 1 + i * num_cols + j)
x, name = data_name_list[i * num_cols + j]
pylab.title(name)
pylab.xlabel('Value')
pylab.ylabel('Density')
# the histogram of the data
max_val = np.max(x)
if max_val <= 1.0:
bins = 50
elif max_val > 50:
bins = 50
else:
bins = max_val
n, bins, patches = pylab.hist(
x, bins=bins, normed=1, facecolor='green', alpha=0.75)
pylab.grid(True)
if not filename:
filename = "feat_hist_%s.png" % name
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_bias_variance(data_sizes, train_errors, test_errors, name):
pylab.clf()
pylab.ylim([0.0, 1.0])
pylab.xlabel('Data set size')
pylab.ylabel('Error')
pylab.title("Bias-Variance for '%s'" % name)
pylab.plot(
data_sizes, train_errors, "-", data_sizes, test_errors, "--", lw=1)
pylab.legend(["train error", "test error"], loc="upper right")
pylab.grid(True)
pylab.savefig(os.path.join(CHART_DIR, "bv_" + name + ".png"))
|
mit
|
YihaoLu/statsmodels
|
statsmodels/regression/tests/test_lme.py
|
19
|
25081
|
import warnings
import numpy as np
import pandas as pd
from statsmodels.regression.mixed_linear_model import MixedLM, MixedLMParams
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
dec, assert_)
from . import lme_r_results
from statsmodels.base import _penalties as penalties
import statsmodels.tools.numdiff as nd
import os
import csv
# TODO: add tests with unequal group sizes
class R_Results(object):
"""
A class for holding various results obtained from fitting one data
set using lmer in R.
Parameters
----------
meth : string
Either "ml" or "reml".
irfs : string
Either "irf", for independent random effects, or "drf" for
dependent random effects.
ds_ix : integer
The number of the data set
"""
def __init__(self, meth, irfs, ds_ix):
bname = "_%s_%s_%d" % (meth, irfs, ds_ix)
self.coef = getattr(lme_r_results, "coef" + bname)
self.vcov_r = getattr(lme_r_results, "vcov" + bname)
self.cov_re_r = getattr(lme_r_results, "cov_re" + bname)
self.scale_r = getattr(lme_r_results, "scale" + bname)
self.loglike = getattr(lme_r_results, "loglike" + bname)
if hasattr(lme_r_results, "ranef_mean" + bname):
self.ranef_postmean = getattr(lme_r_results, "ranef_mean"
+ bname)
self.ranef_condvar = getattr(lme_r_results,
"ranef_condvar" + bname)
self.ranef_condvar = np.atleast_2d(self.ranef_condvar)
# Load the data file
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, "lme%02d.csv" % ds_ix)
fid = open(fname)
rdr = csv.reader(fid)
header = next(rdr)
data = [[float(x) for x in line] for line in rdr]
data = np.asarray(data)
# Split into exog, endog, etc.
self.endog = data[:,header.index("endog")]
self.groups = data[:,header.index("groups")]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_fe")]
self.exog_fe = data[:,ii]
ii = [i for i,x in enumerate(header) if
x.startswith("exog_re")]
self.exog_re = data[:,ii]
def loglike_function(model, profile_fe, has_fe):
"""
Returns a function that evaluates the negative log-likelihood for
the given model.
"""
def f(x):
params = MixedLMParams.from_packed(x, model.k_fe, model.k_re, model.use_sqrt, has_fe=has_fe)
return -model.loglike(params, profile_fe=profile_fe)
return f
def score_function(model, profile_fe, has_fe):
"""
Returns a function that evaluates the negative score function for
the given model.
"""
def f(x):
params = MixedLMParams.from_packed(x, model.k_fe, model.use_sqrt, has_fe=not profile_fe)
return -model.score(params, profile_fe=profile_fe)
return f
class TestMixedLM(object):
# Test analytic scores and Hessian using numeric differentiation
@dec.slow
def test_compare_numdiff(self):
n_grp = 200
grpsize = 5
k_fe = 3
k_re = 2
for use_sqrt in False,True:
for reml in False,True:
for profile_fe in False,True:
np.random.seed(3558)
exog_fe = np.random.normal(size=(n_grp*grpsize, k_fe))
exog_re = np.random.normal(size=(n_grp*grpsize, k_re))
exog_re[:, 0] = 1
exog_vc = np.random.normal(size=(n_grp*grpsize, 3))
slopes = np.random.normal(size=(n_grp, k_re))
slopes[:, -1] *= 2
slopes = np.kron(slopes, np.ones((grpsize,1)))
slopes_vc = np.random.normal(size=(n_grp, 3))
slopes_vc = np.kron(slopes_vc, np.ones((grpsize,1)))
slopes_vc[:, -1] *= 2
re_values = (slopes * exog_re).sum(1)
vc_values = (slopes_vc * exog_vc).sum(1)
err = np.random.normal(size=n_grp*grpsize)
endog = exog_fe.sum(1) + re_values + vc_values + err
groups = np.kron(range(n_grp), np.ones(grpsize))
vc = {"a": {}, "b": {}}
for i in range(n_grp):
ix = np.flatnonzero(groups == i)
vc["a"][i] = exog_vc[ix, 0:2]
vc["b"][i] = exog_vc[ix, 2:3]
model = MixedLM(endog, exog_fe, groups, exog_re, exog_vc=vc, use_sqrt=use_sqrt)
rslt = model.fit(reml=reml)
loglike = loglike_function(model, profile_fe=profile_fe, has_fe=not profile_fe)
score = score_function(model, profile_fe=profile_fe, has_fe=not profile_fe)
# Test the score at several points.
for kr in range(5):
fe_params = np.random.normal(size=k_fe)
cov_re = np.random.normal(size=(k_re, k_re))
cov_re = np.dot(cov_re.T, cov_re)
vcomp = np.random.normal(size=2)**2
params = MixedLMParams.from_components(fe_params, cov_re=cov_re, vcomp=vcomp)
params_vec = params.get_packed(has_fe=not profile_fe, use_sqrt=use_sqrt)
# Check scores
gr = -model.score(params, profile_fe=profile_fe)
ngr = nd.approx_fprime(params_vec, loglike)
assert_allclose(gr, ngr, rtol=1e-3)
# Check Hessian matrices at the MLE (we don't have
# the profile Hessian matrix and we don't care
# about the Hessian for the square root
# transformed parameter).
if (profile_fe == False) and (use_sqrt == False):
hess = -model.hessian(rslt.params_object)
params_vec = rslt.params_object.get_packed(use_sqrt=False, has_fe=True)
loglike_h = loglike_function(model, profile_fe=False, has_fe=True)
nhess = nd.approx_hess(params_vec, loglike_h)
assert_allclose(hess, nhess, rtol=1e-3)
def test_default_re(self):
np.random.seed(3235)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mdf1 = MixedLM(endog, exog, groups).fit()
mdf2 = MixedLM(endog, exog, groups, np.ones(300)).fit()
assert_almost_equal(mdf1.params, mdf2.params, decimal=8)
def test_history(self):
np.random.seed(3235)
exog = np.random.normal(size=(300,4))
groups = np.kron(np.arange(100), [1,1,1])
g_errors = np.kron(np.random.normal(size=100), [1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mod = MixedLM(endog, exog, groups)
rslt = mod.fit(full_output=True)
assert_equal(hasattr(rslt, "hist"), True)
def test_profile_inference(self):
# Smoke test
np.random.seed(9814)
k_fe = 2
gsize = 3
n_grp = 100
exog = np.random.normal(size=(n_grp * gsize, k_fe))
exog_re = np.ones((n_grp * gsize, 1))
groups = np.kron(np.arange(n_grp), np.ones(gsize))
vca = np.random.normal(size=n_grp * gsize)
vcb = np.random.normal(size=n_grp * gsize)
errors = 0
g_errors = np.kron(np.random.normal(size=100), np.ones(gsize))
errors += g_errors + exog_re[:, 0]
rc = np.random.normal(size=n_grp)
errors += np.kron(rc, np.ones(gsize)) * vca
rc = np.random.normal(size=n_grp)
errors += np.kron(rc, np.ones(gsize)) * vcb
errors += np.random.normal(size=n_grp * gsize)
endog = exog.sum(1) + errors
vc = {"a" : {}, "b" : {}}
for k in range(n_grp):
ii = np.flatnonzero(groups == k)
vc["a"][k] = vca[ii][:, None]
vc["b"][k] = vcb[ii][:, None]
rslt = MixedLM(endog, exog, groups=groups, exog_re=exog_re, exog_vc=vc).fit()
prof_re = rslt.profile_re(0, vtype='re', dist_low=1, num_low=3, dist_high=1,
num_high=3)
prof_vc = rslt.profile_re('b', vtype='vc', dist_low=0.5, num_low=3, dist_high=0.5,
num_high=3)
# Fails on old versions of scipy/numpy
def txest_vcomp_1(self):
"""
Fit the same model using constrained random effects and variance components.
"""
np.random.seed(4279)
exog = np.random.normal(size=(400, 1))
exog_re = np.random.normal(size=(400, 2))
groups = np.kron(np.arange(100), np.ones(4))
slopes = np.random.normal(size=(100, 2))
slopes[:, 1] *= 2
slopes = np.kron(slopes, np.ones((4, 1))) * exog_re
errors = slopes.sum(1) + np.random.normal(size=400)
endog = exog.sum(1) + errors
free = MixedLMParams(1, 2, 0)
free.fe_params = np.ones(1)
free.cov_re = np.eye(2)
free.vcomp = np.zeros(0)
model1 = MixedLM(endog, exog, groups, exog_re=exog_re)
result1 = model1.fit(free=free)
exog_vc = {"a": {}, "b": {}}
for k,group in enumerate(model1.group_labels):
ix = model1.row_indices[group]
exog_vc["a"][group] = exog_re[ix, 0:1]
exog_vc["b"][group] = exog_re[ix, 1:2]
model2 = MixedLM(endog, exog, groups, exog_vc=exog_vc)
result2 = model2.fit()
result2.summary()
assert_allclose(result1.fe_params, result2.fe_params, atol=1e-4)
assert_allclose(np.diag(result1.cov_re), result2.vcomp, atol=1e-2, rtol=1e-4)
assert_allclose(result1.bse[[0, 1, 3]], result2.bse, atol=1e-2, rtol=1e-2)
def test_vcomp_2(self):
"""
Simulated data comparison to R
"""
np.random.seed(6241)
n = 1600
exog = np.random.normal(size=(n, 2))
ex_vc = []
groups = np.kron(np.arange(n / 16), np.ones(16))
# Build up the random error vector
errors = 0
# The random effects
exog_re = np.random.normal(size=(n, 2))
slopes = np.random.normal(size=(n / 16, 2))
slopes = np.kron(slopes, np.ones((16, 1))) * exog_re
errors += slopes.sum(1)
# First variance component
subgroups1 = np.kron(np.arange(n / 4), np.ones(4))
errors += np.kron(2*np.random.normal(size=n/4), np.ones(4))
# Second variance component
subgroups2 = np.kron(np.arange(n / 2), np.ones(2))
errors += np.kron(2*np.random.normal(size=n/2), np.ones(2))
# iid errors
errors += np.random.normal(size=n)
endog = exog.sum(1) + errors
df = pd.DataFrame(index=range(n))
df["y"] = endog
df["groups"] = groups
df["x1"] = exog[:, 0]
df["x2"] = exog[:, 1]
df["z1"] = exog_re[:, 0]
df["z2"] = exog_re[:, 1]
df["v1"] = subgroups1
df["v2"] = subgroups2
# Equivalent model in R:
# df.to_csv("tst.csv")
# model = lmer(y ~ x1 + x2 + (0 + z1 + z2 | groups) + (1 | v1) + (1 | v2), df)
vcf = {"a": "0 + C(v1)", "b": "0 + C(v2)"}
model1 = MixedLM.from_formula("y ~ x1 + x2", groups=groups, re_formula="0+z1+z2",
vc_formula=vcf, data=df)
result1 = model1.fit()
# Compare to R
assert_allclose(result1.fe_params, [0.16527, 0.99911, 0.96217], rtol=1e-4)
assert_allclose(result1.cov_re, [[1.244, 0.146], [0.146 , 1.371]], rtol=1e-3)
assert_allclose(result1.vcomp, [4.024, 3.997], rtol=1e-3)
assert_allclose(result1.bse.iloc[0:3], [0.12610, 0.03938, 0.03848], rtol=1e-3)
def test_sparse(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, 'pastes.csv')
# Dense
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit()
# Sparse
from scipy import sparse
model2 = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
use_sparse=True, data=data)
result2 = model.fit()
assert_allclose(result.params, result2.params)
assert_allclose(result.bse, result2.bse)
def test_pastes_vcomp(self):
"""
pastes data from lme4
Fit in R using formula:
strength ~ (1|batch) + (1|batch:cask)
"""
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fname = os.path.join(rdir, 'pastes.csv')
# REML
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit()
assert_allclose(result.fe_params.iloc[0], 60.0533, rtol=1e-3)
assert_allclose(result.bse.iloc[0], 0.6769, rtol=1e-3)
assert_allclose(result.cov_re.iloc[0, 0], 1.657, rtol=1e-3)
assert_allclose(result.scale, 0.678, rtol=1e-3)
assert_allclose(result.llf, -123.49, rtol=1e-1)
assert_equal(result.aic, np.nan) # don't provide aic/bic with REML
assert_equal(result.bic, np.nan)
resid = np.r_[0.17133538, -0.02866462, -1.08662875, 1.11337125, -0.12093607]
assert_allclose(result.resid[0:5], resid, rtol=1e-3)
fit = np.r_[62.62866, 62.62866, 61.18663, 61.18663, 62.82094]
assert_allclose(result.fittedvalues[0:5], fit, rtol=1e-4)
# ML
data = pd.read_csv(fname)
vcf = {"cask" : "0 + cask"}
model = MixedLM.from_formula("strength ~ 1", groups="batch",
re_formula="1", vc_formula=vcf,
data=data)
result = model.fit(reml=False)
assert_allclose(result.fe_params.iloc[0], 60.0533, rtol=1e-3)
assert_allclose(result.bse.iloc[0], 0.642, rtol=1e-3)
assert_allclose(result.cov_re.iloc[0, 0], 1.199, rtol=1e-3)
assert_allclose(result.scale, 0.67799, rtol=1e-3)
assert_allclose(result.llf, -123.997, rtol=1e-1)
assert_allclose(result.aic, 255.9944, rtol=1e-3)
assert_allclose(result.bic, 264.3718, rtol=1e-3)
def test_vcomp_formula(self):
np.random.seed(6241)
n = 800
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
ex_vc = []
groups = np.kron(np.arange(n/4), np.ones(4))
errors = 0
exog_re = np.random.normal(size=(n, 2))
slopes = np.random.normal(size=(n/4, 2))
slopes = np.kron(slopes, np.ones((4, 1))) * exog_re
errors += slopes.sum(1)
ex_vc = np.random.normal(size=(n, 4))
slopes = np.random.normal(size=(n/4, 4))
slopes[:, 2:] *= 2
slopes = np.kron(slopes, np.ones((4, 1))) * ex_vc
errors += slopes.sum(1)
errors += np.random.normal(size=n)
endog = exog.sum(1) + errors
exog_vc = {"a": {}, "b": {}}
for k,group in enumerate(range(int(n/4))):
ix = np.flatnonzero(groups == group)
exog_vc["a"][group] = ex_vc[ix, 0:2]
exog_vc["b"][group] = ex_vc[ix, 2:]
model1 = MixedLM(endog, exog, groups, exog_re=exog_re, exog_vc=exog_vc)
result1 = model1.fit()
df = pd.DataFrame(exog[:, 1:], columns=["x1",])
df["y"] = endog
df["re1"] = exog_re[:, 0]
df["re2"] = exog_re[:, 1]
df["vc1"] = ex_vc[:, 0]
df["vc2"] = ex_vc[:, 1]
df["vc3"] = ex_vc[:, 2]
df["vc4"] = ex_vc[:, 3]
vc_formula = {"a": "0 + vc1 + vc2", "b": "0 + vc3 + vc4"}
model2 = MixedLM.from_formula("y ~ x1", groups=groups, re_formula="0 + re1 + re2",
vc_formula=vc_formula, data=df)
result2 = model2.fit()
assert_allclose(result1.fe_params, result2.fe_params, rtol=1e-8)
assert_allclose(result1.cov_re, result2.cov_re, rtol=1e-8)
assert_allclose(result1.vcomp, result2.vcomp, rtol=1e-8)
assert_allclose(result1.params, result2.params, rtol=1e-8)
assert_allclose(result1.bse, result2.bse, rtol=1e-8)
def test_formulas(self):
np.random.seed(2410)
exog = np.random.normal(size=(300,4))
exog_re = np.random.normal(size=300)
groups = np.kron(np.arange(100), [1,1,1])
g_errors = exog_re * np.kron(np.random.normal(size=100),
[1,1,1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
mod1 = MixedLM(endog, exog, groups, exog_re)
# test the names
assert_(mod1.data.xnames == ["x1", "x2", "x3", "x4"])
assert_(mod1.data.exog_re_names == ["Z1"])
assert_(mod1.data.exog_re_names_full == ["Z1 RE"])
rslt1 = mod1.fit()
# Fit with a formula, passing groups as the actual values.
df = pd.DataFrame({"endog": endog})
for k in range(exog.shape[1]):
df["exog%d" % k] = exog[:,k]
df["exog_re"] = exog_re
fml = "endog ~ 0 + exog0 + exog1 + exog2 + exog3"
re_fml = "0 + exog_re"
mod2 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups=groups)
assert_(mod2.data.xnames == ["exog0", "exog1", "exog2", "exog3"])
assert_(mod2.data.exog_re_names == ["exog_re"])
assert_(mod2.data.exog_re_names_full == ["exog_re RE"])
rslt2 = mod2.fit()
assert_almost_equal(rslt1.params, rslt2.params)
# Fit with a formula, passing groups as the variable name.
df["groups"] = groups
mod3 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups="groups")
assert_(mod3.data.xnames == ["exog0", "exog1", "exog2", "exog3"])
assert_(mod3.data.exog_re_names == ["exog_re"])
assert_(mod3.data.exog_re_names_full == ["exog_re RE"])
rslt3 = mod3.fit(start_params=rslt2.params)
assert_allclose(rslt1.params, rslt3.params, rtol=1e-4)
# Check default variance structure with non-formula model
# creation.
exog_re = np.ones(len(endog), dtype=np.float64)
mod4 = MixedLM(endog, exog, groups, exog_re)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rslt4 = mod4.fit(start_params=rslt2.params)
from statsmodels.formula.api import mixedlm
mod5 = mixedlm(fml, df, groups="groups")
assert_(mod5.data.exog_re_names == ["groups"])
assert_(mod5.data.exog_re_names_full == ["groups RE"])
rslt5 = mod5.fit(start_params=rslt2.params)
assert_almost_equal(rslt4.params, rslt5.params)
def test_regularized(self):
np.random.seed(3453)
exog = np.random.normal(size=(400,5))
groups = np.kron(np.arange(100), np.ones(4))
expected_endog = exog[:,0] - exog[:,2]
endog = expected_endog +\
np.kron(np.random.normal(size=100), np.ones(4)) +\
np.random.normal(size=400)
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf1 = md.fit_regularized(alpha=1.)
mdf1.summary()
# L1 regularization
md = MixedLM(endog, exog, groups)
mdf2 = md.fit_regularized(alpha=10*np.ones(5))
mdf2.summary()
# L2 regularization
pen = penalties.L2()
mdf3 = md.fit_regularized(method=pen, alpha=0.)
mdf3.summary()
# L2 regularization
pen = penalties.L2()
mdf4 = md.fit_regularized(method=pen, alpha=100.)
mdf4.summary()
# Pseudo-Huber regularization
pen = penalties.PseudoHuber(0.3)
mdf4 = md.fit_regularized(method=pen, alpha=1.)
mdf4.summary()
def do1(self, reml, irf, ds_ix):
# No need to check independent random effects when there is
# only one of them.
if irf and ds_ix < 6:
return
irfs = "irf" if irf else "drf"
meth = "reml" if reml else "ml"
rslt = R_Results(meth, irfs, ds_ix)
# Fit the model
md = MixedLM(rslt.endog, rslt.exog_fe, rslt.groups,
rslt.exog_re)
if not irf: # Free random effects covariance
mdf = md.fit(gtol=1e-7, reml=reml)
else: # Independent random effects
k_fe = rslt.exog_fe.shape[1]
k_re = rslt.exog_re.shape[1]
free = MixedLMParams(k_fe, k_re, 0)
free.fe_params = np.ones(k_fe)
free.cov_re = np.eye(k_re)
free.vcomp = np.array([])
mdf = md.fit(reml=reml, gtol=1e-7, free=free)
assert_almost_equal(mdf.fe_params, rslt.coef, decimal=4)
assert_almost_equal(mdf.cov_re, rslt.cov_re_r, decimal=4)
assert_almost_equal(mdf.scale, rslt.scale_r, decimal=4)
k_fe = md.k_fe
assert_almost_equal(rslt.vcov_r, mdf.cov_params()[0:k_fe,0:k_fe],
decimal=3)
assert_almost_equal(mdf.llf, rslt.loglike[0], decimal=2)
# Not supported in R except for independent random effects
if not irf:
assert_almost_equal(mdf.random_effects[0], rslt.ranef_postmean,
decimal=3)
assert_almost_equal(mdf.random_effects_cov[0],
rslt.ranef_condvar,
decimal=3)
# Run all the tests against R
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("lme")
and x.endswith(".csv")]
for fname in fnames:
for reml in False,True:
for irf in False,True:
ds_ix = int(fname[3:5])
yield self.do1, reml, irf, ds_ix
def test_mixed_lm_wrapper():
# a bit more complicated model to test
np.random.seed(2410)
exog = np.random.normal(size=(300, 4))
exog_re = np.random.normal(size=300)
groups = np.kron(np.arange(100), [1, 1, 1])
g_errors = exog_re * np.kron(np.random.normal(size=100),
[1, 1, 1])
endog = exog.sum(1) + g_errors + np.random.normal(size=300)
# Fit with a formula, passing groups as the actual values.
df = pd.DataFrame({"endog": endog})
for k in range(exog.shape[1]):
df["exog%d" % k] = exog[:, k]
df["exog_re"] = exog_re
fml = "endog ~ 0 + exog0 + exog1 + exog2 + exog3"
re_fml = "~ exog_re"
mod2 = MixedLM.from_formula(fml, df, re_formula=re_fml,
groups=groups)
result = mod2.fit()
smoke = result.summary()
xnames = ["exog0", "exog1", "exog2", "exog3"]
re_names = ["Intercept", "exog_re"]
re_names_full = ["Intercept RE", "Intercept RE x exog_re RE",
"exog_re RE"]
assert_(mod2.data.xnames == xnames)
assert_(mod2.data.exog_re_names == re_names)
assert_(mod2.data.exog_re_names_full == re_names_full)
params = result.params
assert_(params.index.tolist() == xnames + re_names_full)
bse = result.bse
assert_(bse.index.tolist() == xnames + re_names_full)
tvalues = result.tvalues
assert_(tvalues.index.tolist() == xnames + re_names_full)
cov_params = result.cov_params()
assert_(cov_params.index.tolist() == xnames + re_names_full)
assert_(cov_params.columns.tolist() == xnames + re_names_full)
fe = result.fe_params
assert_(fe.index.tolist() == xnames)
bse_fe = result.bse_fe
assert_(bse_fe.index.tolist() == xnames)
cov_re = result.cov_re
assert_(cov_re.index.tolist() == re_names)
assert_(cov_re.columns.tolist() == re_names)
cov_re_u = result.cov_re_unscaled
assert_(cov_re_u.index.tolist() == re_names)
assert_(cov_re_u.columns.tolist() == re_names)
bse_re = result.bse_re
assert_(bse_re.index.tolist() == re_names_full)
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
bsd-3-clause
|
BlackArbsCEO/trading-with-python
|
cookbook/workingWithDatesAndTime.py
|
77
|
1551
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S")
|
bsd-3-clause
|
DinoCow/airflow
|
tests/providers/salesforce/hooks/test_salesforce.py
|
7
|
9659
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import Mock, patch
import pandas as pd
from numpy import nan
from simple_salesforce import Salesforce
from airflow.models.connection import Connection
from airflow.providers.salesforce.hooks.salesforce import SalesforceHook
class TestSalesforceHook(unittest.TestCase):
def setUp(self):
self.salesforce_hook = SalesforceHook(conn_id="conn_id")
def test_get_conn_exists(self):
self.salesforce_hook.conn = Mock(spec=Salesforce)
self.salesforce_hook.get_conn()
self.assertIsNotNone(self.salesforce_hook.conn.return_value)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_connection",
return_value=Connection(
login="username", password="password", extra='{"security_token": "token", "domain": "test"}'
),
)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_get_conn(self, mock_salesforce, mock_get_connection):
self.salesforce_hook.get_conn()
self.assertEqual(self.salesforce_hook.conn, mock_salesforce.return_value)
mock_salesforce.assert_called_once_with(
username=mock_get_connection.return_value.login,
password=mock_get_connection.return_value.password,
security_token=mock_get_connection.return_value.extra_dejson["security_token"],
instance_url=mock_get_connection.return_value.host,
domain=mock_get_connection.return_value.extra_dejson.get("domain"),
)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_make_query(self, mock_salesforce):
mock_salesforce.return_value.query_all.return_value = dict(totalSize=123, done=True)
self.salesforce_hook.conn = mock_salesforce.return_value
query = "SELECT * FROM table"
query_results = self.salesforce_hook.make_query(query, include_deleted=True)
mock_salesforce.return_value.query_all.assert_called_once_with(query, include_deleted=True)
self.assertEqual(query_results, mock_salesforce.return_value.query_all.return_value)
@patch("airflow.providers.salesforce.hooks.salesforce.Salesforce")
def test_describe_object(self, mock_salesforce):
obj = "obj_name"
mock_salesforce.return_value.__setattr__(obj, Mock(spec=Salesforce))
self.salesforce_hook.conn = mock_salesforce.return_value
obj_description = self.salesforce_hook.describe_object(obj)
mock_salesforce.return_value.__getattr__(obj).describe.assert_called_once_with()
self.assertEqual(obj_description, mock_salesforce.return_value.__getattr__(obj).describe.return_value)
@patch("airflow.providers.salesforce.hooks.salesforce.SalesforceHook.get_conn")
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1"}, {"name": "field_2"}]},
)
def test_get_available_fields(self, mock_describe_object, mock_get_conn):
obj = "obj_name"
available_fields = self.salesforce_hook.get_available_fields(obj)
mock_get_conn.assert_called_once_with()
mock_describe_object.assert_called_once_with(obj)
self.assertEqual(available_fields, ["field_1", "field_2"])
@patch("airflow.providers.salesforce.hooks.salesforce.SalesforceHook.make_query")
def test_get_object_from_salesforce(self, mock_make_query):
salesforce_objects = self.salesforce_hook.get_object_from_salesforce(
obj="obj_name", fields=["field_1", "field_2"]
)
mock_make_query.assert_called_once_with("SELECT field_1,field_2 FROM obj_name")
self.assertEqual(salesforce_objects, mock_make_query.return_value)
def test_write_object_to_file_invalid_format(self):
with self.assertRaises(ValueError):
self.salesforce_hook.write_object_to_file(query_results=[], filename="test", fmt="test")
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "dict": [nan, nan, {"foo": "bar"}]}),
)
def test_write_object_to_file_csv(self, mock_data_frame):
mock_data_frame.return_value.to_csv = Mock()
filename = "test"
data_frame = self.salesforce_hook.write_object_to_file(query_results=[], filename=filename, fmt="csv")
mock_data_frame.return_value.to_csv.assert_called_once_with(filename, index=False)
# Note that the latest version of pandas dataframes (1.1.2) returns "nan" rather than "None" here
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame({"test": [1, 2, 3], "dict": ["nan", "nan", str({"foo": "bar"})]}),
check_index_type=False,
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1", "type": "date"}]},
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "field_1": ["2019-01-01", "2019-01-02", "2019-01-03"]}),
)
def test_write_object_to_file_json_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):
mock_data_frame.return_value.to_json = Mock()
filename = "test"
obj_name = "obj_name"
data_frame = self.salesforce_hook.write_object_to_file(
query_results=[{"attributes": {"type": obj_name}}],
filename=filename,
fmt="json",
coerce_to_timestamp=True,
)
mock_describe_object.assert_called_once_with(obj_name)
mock_data_frame.return_value.to_json.assert_called_once_with(filename, "records", date_unit="s")
pd.testing.assert_frame_equal(
data_frame, pd.DataFrame({"test": [1, 2, 3], "field_1": [1.546301e09, 1.546387e09, 1.546474e09]})
)
@patch("airflow.providers.salesforce.hooks.salesforce.time.time", return_value=1.23)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3]}),
)
def test_write_object_to_file_ndjson_with_record_time(self, mock_data_frame, mock_time):
mock_data_frame.return_value.to_json = Mock()
filename = "test"
data_frame = self.salesforce_hook.write_object_to_file(
query_results=[], filename=filename, fmt="ndjson", record_time_added=True
)
mock_data_frame.return_value.to_json.assert_called_once_with(
filename, "records", lines=True, date_unit="s"
)
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame(
{
"test": [1, 2, 3],
"time_fetched_from_salesforce": [
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
],
}
),
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.SalesforceHook.describe_object",
return_value={"fields": [{"name": "field_1", "type": "date"}]},
)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3], "field_1": ["2019-01-01", "2019-01-02", "2019-01-03"]}),
)
def test_obect_to_df_with_timestamp_conversion(self, mock_data_frame, mock_describe_object):
obj_name = "obj_name"
data_frame = self.salesforce_hook.object_to_df(
query_results=[{"attributes": {"type": obj_name}}],
coerce_to_timestamp=True,
)
mock_describe_object.assert_called_once_with(obj_name)
pd.testing.assert_frame_equal(
data_frame, pd.DataFrame({"test": [1, 2, 3], "field_1": [1.546301e09, 1.546387e09, 1.546474e09]})
)
@patch("airflow.providers.salesforce.hooks.salesforce.time.time", return_value=1.23)
@patch(
"airflow.providers.salesforce.hooks.salesforce.pd.DataFrame.from_records",
return_value=pd.DataFrame({"test": [1, 2, 3]}),
)
def test_object_to_df_with_record_time(self, mock_data_frame, mock_time):
data_frame = self.salesforce_hook.object_to_df(query_results=[], record_time_added=True)
pd.testing.assert_frame_equal(
data_frame,
pd.DataFrame(
{
"test": [1, 2, 3],
"time_fetched_from_salesforce": [
mock_time.return_value,
mock_time.return_value,
mock_time.return_value,
],
}
),
)
|
apache-2.0
|
JakeColtman/bartpy
|
tests/test_split.py
|
1
|
3502
|
from operator import le, gt
import unittest
import pandas as pd
import numpy as np
from bartpy.data import Data, make_bartpy_data
from bartpy.split import SplitCondition, Split, CombinedCondition
class TestSplit(unittest.TestCase):
def test_null_split_returns_all_values(self):
data = make_bartpy_data(pd.DataFrame({"a": [1, 2]}).values, np.array([1, 2]))
split = Split(data)
conditioned_data = split.data
self.assertListEqual(list(data.X.get_column(0)), list(conditioned_data.X.get_column(0)))
def test_single_condition_data(self):
data = make_bartpy_data(pd.DataFrame({"a": [1, 2]}).values, np.array([1, 2]))
left_condition, right_condition = SplitCondition(0, 1, le), SplitCondition(0, 1, gt)
left_split, right_split = Split(data) + left_condition, Split(data) + right_condition
self.assertListEqual([1], list(left_split.data.X.get_column(0)))
self.assertListEqual([2], list(right_split.data.X.get_column(0)))
def test_combined_condition_data(self):
data = make_bartpy_data(pd.DataFrame({"a": [1, 2, 3, 4]}).values, np.array([1, 2, 1, 1]))
first_left_condition, first_right_condition = SplitCondition(0, 3, le), SplitCondition(0, 3, gt)
second_left_condition, second_right_condition = SplitCondition(0, 1, le), SplitCondition(0, 1, gt)
split = Split(data)
updated_split = split + first_left_condition + second_right_condition
conditioned_data = updated_split.data
self.assertListEqual([2, 3], list(conditioned_data.X.get_column(0)))
def test_most_recent_split(self):
data = make_bartpy_data(pd.DataFrame({"a": [1, 2, 3, 4]}).values, np.array([1, 2, 1, 1]))
first_left_condition, first_right_condition = SplitCondition(0, 3, le), SplitCondition(0, 3, gt)
second_left_condition, second_right_condition = SplitCondition(0, 1, le), SplitCondition(0, 1, gt)
split = Split(data)
updated_split = split + first_left_condition + second_right_condition
self.assertEqual((split + first_left_condition).most_recent_split_condition(), first_left_condition)
self.assertEqual(updated_split.most_recent_split_condition(), second_right_condition)
class TestCombinedCondition(unittest.TestCase):
def setUp(self):
self.X = np.array([1, 2, 4, 6, 3, 5]).reshape(6, 1)
def test_single_condition(self):
condition = SplitCondition(0, 3, gt)
combined_condition = CombinedCondition([0], [condition])
self.assertListEqual(list(combined_condition.condition(self.X)), [False, False, True, True, False, True])
def test_multiple_conditions(self):
conditions = [
SplitCondition(0, 2, gt),
SplitCondition(0, 5, le)
]
combined_condition = CombinedCondition([0], conditions)
self.assertEqual(combined_condition.variables[0].min_value, 2)
self.assertEqual(combined_condition.variables[0].max_value, 5)
self.assertListEqual(list(combined_condition.condition(self.X)), [False, False, True, False, True, True])
def test_multiple_variables(self):
conditions = [
SplitCondition(0, 2, gt),
SplitCondition(1, 1, gt)
]
X = self.X[:, 0].reshape(3, 2)
combined_condition = CombinedCondition([0, 1], conditions)
self.assertListEqual(list(combined_condition.condition(X)), [False, True, True])
if __name__ == '__main__':
unittest.main()
|
mit
|
kcavagnolo/astroML
|
book_figures/chapter5/fig_likelihood_gaussian.py
|
4
|
2628
|
"""
Log-likelihood for Gaussian Distribution
----------------------------------------
Figure5.4
An illustration of the logarithm of the posterior probability density
function for :math:`\mu` and :math:`\sigma`, :math:`L_p(\mu,\sigma)`
(see eq. 5.58) for data drawn from a Gaussian distribution and N = 10, x = 1,
and V = 4. The maximum of :math:`L_p` is renormalized to 0, and color coded as
shown in the legend. The maximum value of :math:`L_p` is at :math:`\mu_0 = 1.0`
and :math:`\sigma_0 = 1.8`. The contours enclose the regions that contain
0.683, 0.955, and 0.997 of the cumulative (integrated) posterior probability.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def gauss_logL(xbar, V, n, sigma, mu):
"""Equation 5.57: gaussian likelihood"""
return (-(n + 1) * np.log(sigma)
- 0.5 * n * ((xbar - mu) ** 2 + V) / sigma ** 2)
#------------------------------------------------------------
# Define the grid and compute logL
sigma = np.linspace(1, 5, 70)
mu = np.linspace(-3, 5, 70)
xbar = 1
V = 4
n = 10
logL = gauss_logL(xbar, V, n, sigma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower',
extent=(mu[0], mu[-1], sigma[0], sigma[-1]),
cmap=plt.cm.binary,
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, sigma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93, r'$L(\mu,\sigma)\ \mathrm{for}\ \bar{x}=1,\ V=4,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
|
bsd-2-clause
|
Garrett-R/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
24
|
1384
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
TomAugspurger/dota
|
dota/helpers.py
|
1
|
3539
|
# -*- coding: utf-8 -*-
import re
import pathlib
from itertools import chain
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
import numpy as np
import pandas as pd
import dota.api as a
def cached_games(directory, regex=r"[\w\/]*?(\d+)\.json"):
"""
Return the match ids of all games.
Parameters
----------
directory : str or pathlib.Path
regex : str. Alternative regex. Used to match games
Returns
-------
match_ids : iterable of Paths
"""
if not isinstance(directory, (pathlib.Path, pathlib.PosixPath,
pathlib.WindowsPath)):
directory = pathlib.Path(directory)
regex = re.compile(regex)
match_ids = filter(lambda x: regex.match(str(x)), directory.iterdir())
return match_ids
def open_or_stringIO(f, as_string=False):
"""
Useful for testing, but not sure how good it actually is.
"""
try:
p = pathlib.Path(f)
if p.exists() and not as_string:
return open(f)
else:
return StringIO(f)
except OSError:
return StringIO(f)
def pb_team_id(df, order=0):
return df.team_id_f.iloc[order]
def pb_opponent_id(df, order=0):
"""
Get the opponent id from a pick / ban Frame.
Parameters
----------
df : DataFrame
formatted like a pick / ban frame
order : int
pick / ban order (1 .. 19)
Returns
-------
opponent_id : int
"""
x = df['team_id_f'].unique()
other_team = {x[0]: x[1], x[1]: x[0]}
return df.team_id_f.map(other_team).iloc[order]
def pb_previous_pbs(df, order=0):
"""
Get the hero id's for all prior picks and bans.
Parameters
----------
df : DataFrame
formatted like a pick / ban frame
order : int
pick / ban order (1 .. 19)
Returns
-------
prior_pbs : Series
index labels are pick0, b0 ... or just order?
values are hero_id_f
"""
pbs = pd.DataFrame(df.hero_id_f.iloc[:order].values,
index=df.order.iloc[:order].values).T
pbs = pbs.rename(columns=lambda x: 'pb_' + str(x))
return pbs
def pb_only_complete_drafts(df):
"""
Remove any matches where at least one team_id is NaN.
Or where the draft has fewer that 20 picks / bans.
"""
good_ids = (~pd.isnull(df['team_id'])).groupby(df['match_id']).all()
good_ids = good_ids[good_ids].index
full_drafts = df.groupby('match_id').apply(len)
full_drafts = full_drafts[full_drafts == 20].index
good_ids = good_ids & full_drafts
return df.query('match_id in @good_ids')
#-----------------------------------------------------------------------------
# Feature extraction
def extract_hero_role():
"""
An array [n_heros x n_roles] with 1's if that hero is that role.
Notes
-----
I'm creating role_id to be an int from the roles in
roles = set(list(chain(*api._hero_roles.values())))
"""
# need to persist this to disk I think.
# then update as neeeded.
by_hero = a._hero_roles
all_heroes = sorted(a._hero_names_to_id.keys())
n_heros = len(all_heroes)
roles = sorted(set(list(chain(*by_hero.values()))))
n_roles = len(roles)
df = pd.DataFrame(np.zeros(shape=(n_heros, n_roles)),
index=all_heroes,
columns=roles)
for hero, hero_roles in by_hero.items():
for role in hero_roles:
df.loc[hero, role] = 1
return df
|
mit
|
orlox/massive_bins_2015
|
2016_ULX/scripts/NSBH/kick_data/post_kick.py
|
1
|
7632
|
#!/usr/bin/env python
import numpy as np
from pylab import *
import matplotlib.pyplot as plt
from matplotlib import rc
rc('text', usetex=True)
import matplotlib.patches as patches
from scipy.interpolate import griddata
import math
import scipy
from matplotlib import ticker
import sys
import os
import mmap
import itertools
import matplotlib as mpl
import matplotlib.gridspec as grd
params = {'backend': 'pdf',
'figure.figsize': [4.3, 3.0],
'font.family':'serif',
'font.size':10,
'font.serif': 'Times Roman',
'axes.titlesize': 'medium',
'axes.labelsize': 'medium',
'legend.fontsize': 8,
'legend.frameon' : False,
'text.usetex': True,
'figure.dpi': 600,
'lines.markersize': 2,
'lines.linewidth': 3,
'lines.antialiased': False,
'path.simplify': False,
'legend.handlelength':3,
'figure.subplot.bottom':0.15,
'figure.subplot.top':0.9,
'figure.subplot.left':0.15,
'figure.subplot.right':0.9}
mpl.rcParams.update(params)
clight = 3e10
cgrav = 6.67e-8
msun = 2e33
WHITE = (1.00,1.00,1.00)
BLACK = (0.00,0.00,0.00)
ORANGE = (0.90,0.60,0.00)
SKY_BLUE = (0.35,0.70,0.90)
BLUE_GREEN = (0.00,0.60,0.50)
YELLOW = (0.95,0.90,0.25)
BLUE = (0.00,0.45,0.70)
VERMILLION = (0.80,0.40,0.00)
RED_PURPLE = (0.80,0.60,0.70)
#hexcols[0] dark bluish
#hexcols[1] light blue
#hexcols[2] greenish
#hexcols[3] dark green
#hexcols[4] brownish
#hexcols[5] light brown
#hexcols[6] pinkish
#hexcols[7] dark something redish
#hexcols[8] magentish
hexcols = ['#332288', '#88CCEE', '#44AA99', '#117733', '#999933', '#DDCC77',\
'#CC6677', '#882255', '#AA4499', '#661100', '#6699CC', '#AA4466','#4477AA']
def ngal(Mbh):
d = 2.26*(Mbh/10)**(5.0/6.0) # in Gpc
#if Mbh>130:
# print 4.0/3.0*math.pi*(d*1000)**3*(2.26)**(-3)*(0.0116)/1e10, d
return min(1e10,4.0/3.0*math.pi*(d*1000)**3*(2.26)**(-3)*(0.0116))
def clamp(val, minimum=0, maximum=255):
if val < minimum:
return minimum
if val > maximum:
return maximum
return val
def colorscale(hexstr, scalefactor):
"""
Scales a hex string by ``scalefactor``. Returns scaled hex string.
To darken the color, use a float value between 0 and 1.
To brighten the color, use a float value greater than 1.
>>> colorscale("#DF3C3C", .5)
#6F1E1E
>>> colorscale("#52D24F", 1.6)
#83FF7E
>>> colorscale("#4F75D2", 1)
#4F75D2
"""
hexstr = hexstr.strip('#')
if scalefactor < 0 or len(hexstr) != 6:
return hexstr
r, g, b = int(hexstr[:2], 16), int(hexstr[2:4], 16), int(hexstr[4:], 16)
r = clamp(r * scalefactor)
g = clamp(g * scalefactor)
b = clamp(b * scalefactor)
return "#%02x%02x%02x" % (r, g, b)
range_integral = 0.0194213341446518*(2.56+0.301)
limited_range = 0.004105169206845768*(0.5+0.301)
folders = ["-2.500","-3.000","-3.500","-4.000","-4.500","-5.000","-5.500","-6.000"]
Zvals = [-2.50,-3.00,-3.50,-4.00,-4.50,-5.00,-5.50,-6.00]
BHNS = np.zeros(len(folders))
BHNS_disrupted = np.zeros(len(folders))
BHNS_merger = np.zeros(len(folders))
BB = np.zeros(len(folders))
BHBH = np.zeros(len(folders))
BHBH_disrupted = np.zeros(len(folders))
BHBH_merger = np.zeros(len(folders))
PISN = np.zeros(len(folders))
for i, folder in enumerate(folders):
file_name = "kick_"+folder+".dat"
data = np.genfromtxt(file_name, dtype=None, names=True, skip_header=1)
print file_name
for datum in data:
if float(datum[2])<0.49:
continue
logM = float(datum[0])
dlm = 0.05
Period = float(datum[2])
dlp = np.log10(Period+0.025) - np.log10(Period-0.025)
#dlp = ((Period+0.025)*np.log(Period+0.025)-(Period+0.025))/np.log(10) - \
# ((Period-0.025)*np.log(Period-0.025)-(Period-0.025))/np.log(10)
dq = 0.05
extra_weight = np.power(np.power(10,logM),-1.35)*dlm*dlp*dq
if datum[6] > 0:
BB[i] += extra_weight
elif datum[4] < 12:
BHNS[i] += (1-datum[8]-datum[7])*extra_weight
BHNS_disrupted[i] += datum[8]*extra_weight
BHNS_merger[i] += datum[7]*extra_weight
elif datum[4] < 60:
BHBH[i] += (1-datum[10]-datum[9])*extra_weight
BHBH_disrupted[i] += datum[10]*extra_weight
BHBH_merger[i] += datum[9]*extra_weight
else:
PISN[i] += extra_weight
sum_remnants = (BB[i]\
+BHNS[i]+BHNS_disrupted[i]+BHNS_merger[i]\
+BHBH[i]+BHBH_disrupted[i]+BHBH_merger[i]\
+PISN[i])
BHNS[i] /= sum_remnants
BHNS_disrupted[i] /= sum_remnants
BHNS_merger[i] /= sum_remnants
BB[i] /= sum_remnants
BHBH[i] /= sum_remnants
BHBH_disrupted[i] /= sum_remnants
BHBH_merger[i] /= sum_remnants
PISN[i] /= sum_remnants
print "BB,BHNS,BHNSd,BHNSm,BHBH,BHBHd,BHBHm,PISN"
for i in range(len(BB)):
print BB[i], BHNS[i],BHNS_disrupted[i],BHNS_merger[i],BHBH[i],BHBH_disrupted[i],BHBH_merger[i],PISN[i]
plt.plot(Zvals,np.log10(BB+1e-10),color="0.5",label="Case MBB")
plt.plot(Zvals,np.log10(BHNS+1e-10),color=hexcols[1],label="wide BH-NS")
plt.plot(Zvals,np.log10(BHNS_merger+1e-10), color=hexcols[1], ls="--",label="merging BH-NS")
plt.plot(Zvals,np.log10(BHNS_disrupted+1e-10), color=hexcols[1], ls =":",label="disrupted BH-NS")
plt.plot(Zvals,np.log10(BHBH+1e-10), color=hexcols[3],label="wide BH-BH")
plt.plot(Zvals,np.log10(BHBH_merger+1e-10), color=hexcols[3],ls="--",label="merging BH-BH")
plt.plot(Zvals,np.log10(BHBH_disrupted+1e-10), color=hexcols[3],ls=":",label="disrupted BH-BH")
plt.plot(Zvals,np.log10(PISN+1e-10), color = hexcols[5],label="PISN")
plt.legend(loc=3)
plt.gca().set_ylim([-3.5,0])
plt.savefig("post_kick.pdf", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False)
# import colormaps as cmaps
# plt.register_cmap(name='viridis', cmap=cmaps.viridis)
# plt.set_cmap(cmaps.viridis)
#
# #gs = grd.GridSpec(2, 2, height_ratios=[1,2], width_ratios=[20,1], wspace=0.1, hspace=0.1)
# gs = grd.GridSpec(1, 1)
#
# axarr = [plt.subplot(gs[0]),plt.subplot(gs[2]),plt.subplot(gs[3])]
#
# axarr[0].set_ylabel("relative number")
# axarr[0].xaxis.set_major_locator(plt.NullLocator())
# n, bins, patches = axarr[0].hist( [bh_masses_A, bh_masses_B], weights=[bh_weights_A,bh_weights_B],\
# histtype='barstacked', stacked = True, color =[hexcols[1],hexcols[4]], label=["Case A","Case B"],\
# rwidth = 1, bins=np.arange(1.4,2.7,0.05),lw=0)
# legend = axarr[0].legend(loc="best",title="$\log\;Z="+folder+"$\n"+"{0:.3}".format(sum_weights*0.01)+" per unit SFR")
# axarr[0].set_xlim([1.4,2.6])
# axarr[0].set_ylim([0,0.4])
# plt.setp(legend.get_title(),fontsize='7')
#
# axarr[1].set_ylabel("$\log$ age [yr]")
# axarr[1].set_xlabel("$\log\;M_{\mathrm{BH}}\;\mathrm{[M_\odot]}$")
# H, xedges, yedges, img = axarr[1].hist2d( hist_data_bhmass, hist_data_times, weights=hist_data_weights,\
# bins=[np.arange(1.4,2.7,0.05),np.arange(6.3,8.15,0.1)], normed = True)
# axarr[1].set_xlim([1.4,2.6])
#
# norm = mpl.colors.Normalize(vmin=0, vmax=1)
# mpl.colorbar.ColorbarBase(axarr[2], norm=norm, orientation='vertical')
#
# plt.savefig("plots/mt_times_"+folder+".pdf", dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False)
|
gpl-3.0
|
roderickmackenzie/opvdm
|
gui/band_graph.py
|
1
|
7135
|
# Organic Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for organic solar cells.
# Copyright (C) 2012 Roderick C. I. MacKenzie
#
# [email protected]
# www.opvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import pygtk
pygtk.require('2.0')
import gtk
import sys
import os
import shutil
from numpy import *
from inp import inp_update_token_value
from inp import inp_get_token_value
from numpy import arange, sin, pi
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
import gobject
import os, fnmatch
from plot_gen import plot_gen
import zipfile
import glob
from scan_item import scan_item_add
from util import lines_to_xyz
from tab import tab_class
from win_lin import running_on_linux
from photon_dist import photon_dist_class
from plot_widget import plot_widget
from plot_state import plot_state
from plot_io import plot_load_info
from progress import progress_class
from cal_path import get_materials_path
from cal_path import get_light_dll_path
from cal_path import get_exe_command
from inp import inp_load_file
from epitaxy import epitaxy_get_layers
from epitaxy import epitaxy_get_mat_file
from epitaxy import epitaxy_get_electrical_layer
from epitaxy import epitaxy_get_width
from epitaxy import epitaxy_get_name
from inp import inp_search_token_value
from matplotlib.figure import Figure
from plot_io import get_plot_file_info
class band_graph(gtk.VBox):
def init(self):
toolbar = gtk.Toolbar()
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_size_request(-1, 50)
self.pack_start(toolbar, False, False, 0)
tool_bar_pos=0
save = gtk.ToolButton(gtk.STOCK_SAVE)
save.connect("clicked", self.callback_save_image)
toolbar.insert(save, tool_bar_pos)
toolbar.show_all()
tool_bar_pos=tool_bar_pos+1
self.my_figure=Figure(figsize=(5,4), dpi=100)
self.canvas = FigureCanvas(self.my_figure) # a gtk.DrawingArea
self.canvas.figure.patch.set_facecolor('white')
self.canvas.set_size_request(600, 400)
self.canvas.show()
self.pack_start(self.canvas, False, False, 0)
self.canvas.connect('key_press_event', self.on_key_press_event)
self.show_all()
def on_key_press_event(self,widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
if keyname == "c":
if event.state == gtk.gdk.CONTROL_MASK:
self.do_clip()
self.canvas.draw()
def do_clip(self):
print "doing clip"
snap = self.my_figure.canvas.get_snapshot()
pixbuf = gtk.gdk.pixbuf_get_from_drawable(None, snap, snap.get_colormap(),0,0,0,0,snap.get_size()[0], snap.get_size()[1])
clip = gtk.Clipboard()
clip.set_image(pixbuf)
def callback_save_image(self, widget):
dialog = gtk.FileChooserDialog("Save plot",
None,
gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
dialog.set_action(gtk.FILE_CHOOSER_ACTION_CREATE_FOLDER)
filter = gtk.FileFilter()
filter.set_name("png")
filter.add_pattern("*.png")
dialog.add_filter(filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.my_figure.savefig(dialog.get_filename())
elif response == gtk.RESPONSE_CANCEL:
print 'Closed'
dialog.destroy()
def set_data_file(self,file):
self.optical_mode_file=os.path.join(os.getcwd(),"light_dump",file)
def draw_graph(self):
self.layer_end=[]
self.layer_name=[]
n=0
self.my_figure.clf()
ax1 = self.my_figure.add_subplot(111)
ax2 = ax1.twinx()
x_pos=0.0
layer=0
color =['r','g','b','y','o','r','g','b','y','o']
start=0.0
for i in range(0,epitaxy_get_layers()):
if epitaxy_get_electrical_layer(i)=="none":
start=start-epitaxy_get_width(i)
else:
break
print "START=",start
start=start*1e9
x_pos=start
for i in range(0,epitaxy_get_layers()):
label=epitaxy_get_mat_file(i)
layer_ticknes=epitaxy_get_width(i)
layer_material=epitaxy_get_mat_file(i)
delta=float(layer_ticknes)*1e9
if epitaxy_get_electrical_layer(i)=="none":
mat_file=os.path.join(os.getcwd(),'materials',layer_material,'mat.inp')
myfile = open(mat_file)
self.mat_file_lines = myfile.readlines()
myfile.close()
for ii in range(0, len(self.mat_file_lines)):
self.mat_file_lines[ii]=self.mat_file_lines[ii].rstrip()
lumo=-float(self.mat_file_lines[1])
Eg=float(self.mat_file_lines[3])
else:
lines=[]
if inp_load_file(lines,epitaxy_get_electrical_layer(i)+".inp")==True:
lumo=-float(inp_search_token_value(lines, "#Xi"))
Eg=float(inp_search_token_value(lines, "#Eg"))
x = [x_pos,x_pos+delta,x_pos+delta,x_pos]
lumo_delta=lumo-0.1
homo=lumo-Eg
homo_delta=homo-0.1
if Eg==0.0:
lumo_delta=-7.0
homo=0.0
lumo_shape = [lumo,lumo,lumo_delta,lumo_delta]
x_pos=x_pos+delta
self.layer_end.append(x_pos)
self.layer_name.append(layer_material)
ax2.fill(x,lumo_shape, color[layer],alpha=0.4)
ax2.text(x_pos-delta/1.5, lumo-0.4, epitaxy_get_name(i))
if homo!=0.0:
homo_shape = [homo,homo,homo_delta,homo_delta]
ax2.fill(x,homo_shape, color[layer],alpha=0.4)
layer=layer+1
n=n+1
state=plot_state()
get_plot_file_info(state,self.optical_mode_file)
#summary="<big><b>"+self.store[path[0]][0]+"</b></big>\n"+"\ntitle: "+state.title+"\nx axis: "+state.x_label+" ("+latex_to_pygtk_subscript(state.x_units)+")\ny axis: "++" ("+latex_to_pygtk_subscript(state.y_units)+")\n\n<big><b>Double click to open</b></big>"
print "ROD!!!!",state.y_label,self.optical_mode_file
ax1.set_ylabel(state.y_label)
ax1.set_xlabel('Position (nm)')
ax2.set_ylabel('Energy (eV)')
ax2.set_xlim([start, x_pos])
#ax2.axis(max=)#autoscale(enable=True, axis='x', tight=None)
loaded=False
if os.path.isfile("light_dump.zip"):
zf = zipfile.ZipFile("light_dump.zip", 'r')
lines = zf.read(self.optical_mode_file).split("\n")
zf.close()
loaded=True
elif os.path.isfile(self.optical_mode_file):
print "I want to load",self.optical_mode_file
f = open(self.optical_mode_file)
lines = f.readlines()
f.close()
loaded=True
if loaded==True:
xx=[]
yy=[]
zz=[]
lines_to_xyz(xx,yy,zz,lines)
t = asarray(xx)
s = asarray(yy)
t=t*1e9
ax1.plot(t,s, 'black', linewidth=3 ,alpha=0.5)
self.my_figure.tight_layout()
|
gpl-2.0
|
Sixshaman/networkx
|
examples/drawing/unix_email.py
|
16
|
2665
|
#!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2005-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
|
bsd-3-clause
|
alexsavio/scikit-learn
|
sklearn/cluster/birch.py
|
22
|
22733
|
# Authors: Manoj Kumar <[email protected]>
# Alexandre Gramfort <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils.extmath import row_norms, safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from .hierarchical import AgglomerativeClustering
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in xrange(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold, branching_factor, is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[[farthest_idx]]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode(object):
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : array-like
list of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
prev_leaf. Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray, shape (branching_factor + 1, n_features)
manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray, shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray
view of ``init_centroids_``.
squared_norm_ : ndarray
view of ``init_sq_norm_``.
"""
def __init__(self, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster(object):
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray, shape (n_features,), optional
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_norm = np.dot(new_centroid, new_centroid)
dot_product = (-2 * new_n) * new_norm
sq_radius = (new_ss + dot_product) / new_n + new_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
dot_product = -2 * np.dot(self.linear_sum_, self.centroid_)
return sqrt(
((self.squared_sum_ + dot_product) / self.n_samples_) +
self.sq_norm_)
class Birch(BaseEstimator, TransformerMixin, ClusterMixin):
"""Implements the Birch clustering algorithm.
Every new sample is inserted into the root of the Clustering Feature
Tree. It is then clubbed together with the subcluster that has the
centroid closest to the new sample. This is done recursively till it
ends up at the subcluster of the leaf of the tree has the closest centroid.
Read more in the :ref:`User Guide <birch>`.
Parameters
----------
threshold : float, default 0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started.
branching_factor : int, default 50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
the node has to be split. The corresponding parent also has to be
split and if the number of subclusters in the parent is greater than
the branching factor, then it has to be split recursively.
n_clusters : int, instance of sklearn.cluster model, default 3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples. If None, this final
clustering step is not performed and the subclusters are returned
as they are. If a model is provided, the model is fit treating
the subclusters as new samples and the initial data is mapped to the
label of the closest subcluster. If an int is provided, the model
fit is AgglomerativeClustering with n_clusters set to the int.
compute_labels : bool, default True
Whether or not to compute labels for each fit.
copy : bool, default True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray,
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray,
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray, shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5,
... compute_labels=True)
>>> brc.fit(X)
Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,
threshold=0.5)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
"""
def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
"""
self.fit_, self.partial_fit_ = True, False
return self._fit(X)
def _fit(self, X):
X = check_array(X, accept_sparse='csr', copy=self.copy)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
partial_fit = getattr(self, 'partial_fit_')
has_root = getattr(self, 'root_', None)
if getattr(self, 'fit_') or (partial_fit and not has_root):
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold, branching_factor, is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold, branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold, branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves: array-like
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features), None
Input data. If X is not provided, only the global clustering
step is done.
"""
self.partial_fit_, self.fit_ = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
self._check_fit(X)
return self._fit(X)
def _check_fit(self, X):
is_fitted = hasattr(self, 'subcluster_centers_')
# Called by partial_fit, before fitting.
has_partial_fit = hasattr(self, 'partial_fit_')
# Should raise an error if one does not fit before predicting.
if not (is_fitted or has_partial_fit):
raise NotFittedError("Fit training data before predicting")
if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]:
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
labels: ndarray, shape(n_samples)
Labelled data.
"""
X = check_array(X, accept_sparse='csr')
self._check_fit(X)
reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T)
reduced_distance *= -2
reduced_distance += self._subcluster_norms
return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
def transform(self, X, y=None):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self, 'subcluster_centers_')
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, int):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters))
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
|
bsd-3-clause
|
ayshih/foxsi-smex
|
pyfoxsi/src/pyfoxsi/response/response.py
|
4
|
8272
|
"""
Response is a module to handle the response of the FOXSI telescopes
"""
from __future__ import absolute_import
import pandas as pd
import numpy as np
import warnings
import os
import matplotlib.pyplot as plt
import astropy.units as u
from scipy import interpolate
import pyfoxsi
import h5py
__all__ = ['Response', 'Material']
class Response(object):
"""An object which provides the FOXSI telescope response
Parameters
----------
shutter_state : int, default 0
A number representing the state of the shutter (0 - no shutter, 1 - thin shutter, 2 - thick shutter)
configuration : int, default 1
Choose the optics configuration
1 : 15 meters
2 : 10 meters 3 modules
3 : 10 meters 2 modules
Examples
--------
>>> from pyfoxsi.response import Response
>>> resp = Response()
>>> resp1 = Response(shutter_state=1)
"""
def __init__(self, shutter_state=0, configuration=1):
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'effective_area_per_module.csv'
effarea_file = os.path.join(path, filename)
optics_effective_area = pd.read_csv(effarea_file, index_col=0, skiprows=4)
optics_effective_area = optics_effective_area[optics_effective_area.columns[configuration-1]]
if configuration == 1:
pyfoxsi.focal_length = 15 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 2:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 3:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 2
self.optics_effective_area = pd.DataFrame(dict(total=optics_effective_area.copy(),
module=optics_effective_area.copy()))
# find what shells are missing
#shell_numbers = np.array(self._eff_area_per_shell.columns, np.uint)
#missing_shells = np.setdiff1d(shell_numbers, pyfoxsi.shell_ids)
# remove the missing shells
self.__number_of_telescopes = 1
#for missing_shell in missing_shells:
# self._eff_area_per_shell.drop(str(missing_shell), 1, inplace=True)
# now add the effective area of all of the shells together
#self.optics_effective_area = pd.DataFrame({'module': self._eff_area_per_shell.sum(axis=1), 'total': self._eff_area_per_shell.sum(axis=1)})
self.effective_area = pd.DataFrame(dict(total=self.optics_effective_area['total'].copy(), module=self.optics_effective_area['module'].copy()))
self.number_of_telescopes = pyfoxsi.number_of_telescopes
self._set_default_optical_path()
if shutter_state > 0:
self.__optical_path.append(Material('al', pyfoxsi.shutters_thickness[shutter_state]))
self.__shutter_state = shutter_state
self._add_optical_path_to_effective_area()
def plot(self, axes=None):
"""Plot the effective area"""
if axes is None:
axes = plt.gca()
a = self.effective_area.plot(axes=axes)
axes.set_title(pyfoxsi.mission_title + ' ' + str(self.number_of_telescopes) + 'x ' + 'Shutter State ' + str(self.shutter_state))
axes.set_ylabel('Effective area [cm$^2$]')
axes.set_xlabel('Energy [keV]')
def _set_default_optical_path(self):
self.__optical_path = [Material('mylar', pyfoxsi.blanket_thickness),
Material(pyfoxsi.detector_material, pyfoxsi.detector_thickness)]
@property
def number_of_telescopes(self):
"""The total number of telescope modules"""
return self.__number_of_telescopes
@number_of_telescopes.setter
def number_of_telescopes(self, x):
self.optics_effective_area['total'] = self.optics_effective_area['total'] / self.__number_of_telescopes * x
self.__number_of_telescopes = x
@property
def optical_path(self):
"""The materials in the optical path including the detector"""
return self.__optical_path
@optical_path.setter
def optical_path(self, x):
self.optical_path = x
self._add_optical_path_to_effective_area()
@property
def shutter_state(self):
"""The shutter state, allowed values are 0, 1, 2"""
return self.__shutter_state
@shutter_state.setter
def shutter_state(self, x):
raise AttributeError('Cannot change shutter state. Create new object with desired shutter state')
def _add_optical_path_to_effective_area(self):
"""Add the effect of the optical path to the effective area"""
energies = np.array(self.optics_effective_area.index)
# Remove 10% of flux due to spiders
factor = np.ones_like(energies) * 0.9
# Apply all of the materials in the optical path to factor
for material in self.optical_path:
print(material.name)
if material.name == pyfoxsi.detector_material:
# if it is the detector than we want the absorption
factor *= material.absorption(energies)
else:
factor *= material.transmission(energies)
self.effective_area['factor'] = factor
self.effective_area['total'] = factor * self.optics_effective_area['total']
self.effective_area['module'] = factor * self.optics_effective_area['module']
class Material(object):
"""An object which provides the optical properties of a material in x-rays
Parameters
----------
material : str
A string representing a material (e.g. cdte, be, mylar, si)
thickness : `astropy.units.Quantity`
The thickness of the material in the optical path.
Examples
--------
>>> from pyfoxsi.response import Material
>>> import astropy.units as u
>>> detector = Material('cdte', 500 * u.um)
>>> thermal_blankets = Material('mylar', 0.5 * u.mm)
"""
def __init__(self, material, thickness):
self.name = material
self.thickness = thickness
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'mass_attenuation_coefficient.hdf5'
data_file = os.path.join(path, filename)
h = h5py.File(data_file, 'r')
data = h[self.name]
self._source_data = data
self.density = u.Quantity(self._source_data.attrs['density'], self._source_data.attrs['density unit'])
data_energy_kev = np.log10(self._source_data[0,:] * 1000)
data_attenuation_coeff = np.log10(self._source_data[1,:])
self._f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff, bounds_error=False, fill_value=0.0)
self._mass_attenuation_coefficient_func = lambda x: 10 ** self._f(np.log10(x))
def __repr__(self):
"""Returns a human-readable representation."""
return '<Material ' + str(self.name) + ' ' + str(self.thickness) + '>'
def transmission(self, energy):
"""Provide the transmission fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV
"""
coefficients = self._mass_attenuation_coefficient_func(energy) * u.cm ** 2 / u.gram
transmission = np.exp(- coefficients * self.density * self.thickness)
return transmission
def absorption(self, energy):
"""Provides the absorption fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV.
"""
return 1 - self.transmission(energy)
def plot(self, axes=None):
if axes is None:
axes = plt.gca()
energies = np.arange(1, 60)
axes.plot(energies, self.transmission(energies), label='Transmission')
axes.plot(energies, self.absorption(energies), label='Absorption')
axes.set_ylim(0, 1.2)
axes.legend()
axes.set_title(self.name + ' ' + str(self.thickness))
axes.set_xlabel('Energy [keV]')
|
mit
|
lin-credible/scikit-learn
|
sklearn/utils/fixes.py
|
133
|
12882
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
waltervh/BornAgain
|
Examples/Demos/simul_demo_lattice1.py
|
2
|
2163
|
'''
Simulation demo: 2d lattice structure of particles
'''
import numpy
import matplotlib
import math
from bornagain import *
M_PI = numpy.pi
# ----------------------------------
# describe sample and run simulation
# ----------------------------------
def RunSimulation():
# defining materials
mAmbience = HomogeneousMaterial("Air", 0.0, 0.0 )
mSubstrate = HomogeneousMaterial("Substrate", 6e-6, 2e-8 )
mParticle = HomogeneousMaterial("Particle", 6e-4, 2e-8 )
# particle
cylinder_ff = FormFactorCylinder(5*nanometer, 5*nanometer)
cylinder = Particle(mParticle, cylinder_ff.clone())
position = kvector_t(0.0, 0.0, 0.0)
cylinder.setPosition(position)
particle_layout = ParticleLayout()
particle_layout.addParticle(cylinder, 1.0)
# interference function
interference = InterferenceFunction2DLattice.createSquare(10.0*nanometer)
pdf = FTDecayFunction2DCauchy(300.0*nanometer/2.0/M_PI, 100.0*nanometer/2.0/M_PI)
interference.setDecayFunction(pdf)
particle_layout.setInterferenceFunction(interference)
# top air layer
air_layer = Layer(mAmbience)
air_layer.addLayout(particle_layout)
# substrate layer
substrate_layer = Layer(mSubstrate, 0)
# multilayer
multi_layer = MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
# build and run experiment
simulation = GISASSimulation()
simulation.setDetectorParameters(100, -2.0*degree, 2.0*degree, 100, 0.0*degree, 4.0*degree)
simulation.setBeamParameters(1.0*angstrom, 0.2*degree, 0.0*degree)
# run simulation
simulation.setSample(multi_layer)
simulation.runSimulation()
return simulation.result().array()
#-------------------------------------------------------------
# main()
#-------------------------------------------------------------
if __name__ == '__main__':
result = RunSimulation()
im = plt.imshow(result+1,
norm=matplotlib.colors.LogNorm(),
extent=[-2.0, 2.0, 0, 4.0])
plt.colorbar(im)
plt.xlabel(r'$\phi_f$', fontsize=20)
plt.ylabel(r'$\alpha_f$', fontsize=20)
plt.show()
|
gpl-3.0
|
kdebrab/pandas
|
pandas/tests/generic/test_series.py
|
4
|
7558
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from operator import methodcaller
import pytest
import numpy as np
import pandas as pd
from distutils.version import LooseVersion
from pandas import Series, date_range, MultiIndex
from pandas.compat import range
from pandas.util.testing import (assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestSeries(Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setup_method(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_set_axis_name(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
funcs = ['rename_axis', '_set_axis_name']
name = 'foo'
for func in funcs:
result = methodcaller(func, name)(s)
assert s.index.name is None
assert result.index.name == name
def test_set_axis_name_mi(self):
s = Series([11, 21, 31], index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]],
names=['l1', 'l2'])
)
funcs = ['rename_axis', '_set_axis_name']
for func in funcs:
result = methodcaller(func, ['L1', 'L2'])(s)
assert s.index.name is None
assert s.index.names == ['l1', 'l2']
assert result.index.name is None
assert result.index.names, ['L1', 'L2']
def test_set_axis_name_raises(self):
s = pd.Series([1])
with pytest.raises(ValueError):
s._set_axis_name(name='a', axis=1)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
assert s.bool()
s = Series([False])
assert not s.bool()
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
pytest.raises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
pytest.raises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
pytest.raises(ValueError, lambda: bool(s))
pytest.raises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
pytest.raises(ValueError, lambda: bool(s))
pytest.raises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
assert result.filename == 'foo+bar'
assert result.name is None
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
LooseVersion(xarray.__version__) <
LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index",
['FloatIndex', 'IntIndex',
'StringIndex', 'UnicodeIndex',
'DateIndex', 'PeriodIndex',
'TimedeltaIndex', 'CategoricalIndex'])
def test_to_xarray_index_types(self, index):
from xarray import DataArray
index = getattr(tm, 'make{}'.format(index))
s = Series(range(6), index=index(6))
s.index.name = 'foo'
result = s.to_xarray()
repr(result)
assert len(result) == 6
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, DataArray)
# idempotency
assert_series_equal(result.to_series(), s,
check_index_type=False,
check_categorical=True)
@td.skip_if_no('xarray', min_version='0.7.0')
def test_to_xarray(self):
from xarray import DataArray
s = Series([])
s.index.name = 'foo'
result = s.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, DataArray)
s = Series(range(6))
s.index.name = 'foo'
s.index = pd.MultiIndex.from_product([['a', 'b'], range(3)],
names=['one', 'two'])
result = s.to_xarray()
assert len(result) == 2
assert_almost_equal(list(result.coords.keys()), ['one', 'two'])
assert isinstance(result, DataArray)
assert_series_equal(result.to_series(), s)
def test_valid_deprecated(self):
# GH18800
with tm.assert_produces_warning(FutureWarning):
pd.Series([]).valid()
|
bsd-3-clause
|
payashim/python_visual_recognition_tutorials
|
01_pedestrian_detector/notebooks/test_before.py
|
1
|
2172
|
import numpy as np
from skimage import data
from skimage.feature import hog
from skimage.transform import resize
import matplotlib.pyplot as plt
from sklearn.externals import joblib
import cv2
from sklearn.cluster import MeanShift, estimate_bandwidth
#load person detector(SVM)
print 'start loading SVM.'
detector = joblib.load('person_detector.pkl')
print 'finish loading SVM'
#person window size
PERSON_WIDTH = 64
PERSON_HEIGHT = 128
test_img_path = './INRIAPerson/Test/pos/person_198.png'
test_img = data.imread(test_img_path,as_grey=True)
#image size
img_w = 320
img_h = 240
img_size = (img_h,img_w)
#step size of sliding window search
step_w = 5
step_h = 5
test_img =resize(test_img, img_size)
likelihood_map = np.zeros(img_size,dtype=np.uint8)
X =[]
for x in range(0,img_w-step_w-PERSON_WIDTH,step_w):
for y in range(0,img_h-step_h-PERSON_HEIGHT,step_h):
window = test_img[y:y+PERSON_HEIGHT,x:x+PERSON_WIDTH]
fd = hog(window, orientations=9, pixels_per_cell=(8,8),cells_per_block=(2,2), visualise=False)
estimated_class = detector.predict(fd)
if estimated_class == 1:
likelihood_map[y:y+PERSON_HEIGHT,x:x+PERSON_WIDTH] +=1
headCenter = [x,y]
X.append(headCenter)
if len(X) > 0:
X = np.array(X)
bandwidth = estimate_bandwidth(X, quantile=0.3)
print 'automatically estimated bandwidth is ' + str(bandwidth)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
cluster_centers = ms.cluster_centers_
print cluster_centers
posColor = (0,127,0)
showImg = cv2.imread(test_img_path)
showImgBGR= cv2.cvtColor(showImg,cv2.COLOR_BGR2RGB)
showImgBGR = resize(showImgBGR, img_size)
showImg = resize(showImg, img_size)
for cluster_center in cluster_centers:
x = int(cluster_center[0])
y = int(cluster_center[1])
headStart= (x,y)
headEnd= (x+64,y+128)
cv2.rectangle(showImgBGR,headStart,headEnd,posColor,1)
plt.figure(figsize=(16, 8))
plt.subplot(121).set_axis_off()
plt.imshow(showImgBGR)
plt.title('Detection(s) Result')
plt.subplot(122).set_axis_off()
plt.imshow(likelihood_map, cmap=plt.cm.gray)
plt.title('Likelihood Map')
plt.show()
|
mit
|
mbayon/TFG-MachineLearning
|
vbig/lib/python2.7/site-packages/sklearn/linear_model/tests/test_sgd.py
|
3
|
50983
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import sgd_fast
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, max_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_max_iter_param(self):
# Test parameter validity check
self.factory(max_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, max_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, max_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01,
max_iter=10, tol=None).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
max_iter=2000, tol=None, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, max_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, max_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, max_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, max_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(max_iter=1000, class_weight="balanced",
shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, max_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, max_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', max_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, max_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.9999999999,
random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.0000000001,
random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', max_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0, tol=None)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
penalty=penalty, shuffle=False,
tol=None, max_iter=6)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert_equal(max_iter, model_0.n_iter_)
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert_greater(max_iter, model_1.n_iter_)
assert_greater(model_1.n_iter_, 5)
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert_greater(model_1.n_iter_, model_2.n_iter_)
assert_greater(model_2.n_iter_, 3)
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
model_3 = assert_warns(ConvergenceWarning, model_3.fit, X, y)
assert_equal(model_3.n_iter_, 3)
def test_future_and_deprecation_warnings():
# Test that warnings are raised. Will be removed in 0.21
def init(max_iter=None, tol=None, n_iter=None):
sgd = SGDClassifier(max_iter=max_iter, tol=tol, n_iter=n_iter)
sgd._validate_params()
# When all default values are used
msg_future = "max_iter and tol parameters have been added in "
assert_warns_message(FutureWarning, msg_future, init)
# When n_iter is specified
msg_deprecation = "n_iter parameter is deprecated"
assert_warns_message(DeprecationWarning, msg_deprecation, init, 6, 0, 5)
# When n_iter=None, and at least one of tol and max_iter is specified
assert_no_warnings(init, 100, None, None)
assert_no_warnings(init, None, 1e-3, None)
assert_no_warnings(init, 100, 1e-3, None)
@ignore_warnings(category=(DeprecationWarning, FutureWarning))
def test_tol_and_max_iter_default_values():
# Test that the default values are correctly changed
est = SGDClassifier()
est._validate_params()
assert_equal(est._tol, None)
assert_equal(est._max_iter, 5)
est = SGDClassifier(n_iter=42)
est._validate_params()
assert_equal(est._tol, None)
assert_equal(est._max_iter, 42)
est = SGDClassifier(tol=1e-2)
est._validate_params()
assert_equal(est._tol, 1e-2)
assert_equal(est._max_iter, 1000)
est = SGDClassifier(max_iter=42)
est._validate_params()
assert_equal(est._tol, None)
assert_equal(est._max_iter, 42)
est = SGDClassifier(max_iter=42, tol=1e-2)
est._validate_params()
assert_equal(est._tol, 1e-2)
assert_equal(est._max_iter, 42)
def _test_gradient_common(loss_function, cases):
# Test gradient of different loss functions
# cases is a list of (p, y, expected)
for p, y, expected in cases:
assert_almost_equal(loss_function.dloss(p, y), expected)
def test_gradient_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected)
(1.1, 1.0, 0.0), (-2.0, -1.0, 0.0),
(1.0, 1.0, -1.0), (-1.0, -1.0, 1.0), (0.5, 1.0, -1.0),
(2.0, -1.0, 1.0), (-0.5, -1.0, 1.0), (0.0, 1.0, -1.0)
]
_test_gradient_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-0.1, -1.0, 0.0),
(0.0, 1.0, -1.0), (0.0, -1.0, 1.0), (0.5, -1.0, 1.0),
(2.0, -1.0, 1.0), (-0.5, 1.0, -1.0), (-1.0, 1.0, -1.0),
]
_test_gradient_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-2.0, -1.0, 0.0), (1.0, -1.0, 4.0),
(-1.0, 1.0, -4.0), (0.5, 1.0, -1.0), (0.5, -1.0, 3.0)
]
_test_gradient_common(loss, cases)
def test_gradient_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected)
(1.0, 1.0, -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, -0.5), (0.0, -1.0, 0.5),
(17.9, -1.0, 1.0), (-17.9, 1.0, -1.0),
]
_test_gradient_common(loss, cases)
assert_almost_equal(loss.dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
def test_gradient_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected)
(0.0, 0.0, 0.0), (1.0, 1.0, 0.0), (1.0, 0.0, 1.0),
(0.5, -1.0, 1.5), (-2.5, 2.0, -4.5)
]
_test_gradient_common(loss, cases)
def test_gradient_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected)
(0.0, 0.0, 0.0), (0.1, 0.0, 0.1), (0.0, 0.1, -0.1),
(3.95, 4.0, -0.05), (5.0, 2.0, 0.1), (-1.0, 5.0, -0.1)
]
_test_gradient_common(loss, cases)
def test_gradient_modified_huber():
# Test ModifiedHuber
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-1.0, -1.0, 0.0), (2.0, 1.0, 0.0),
(0.0, 1.0, -2.0), (-1.0, 1.0, -4.0), (0.5, -1.0, 3.0),
(0.5, -1.0, 3.0), (-2.0, 1.0, -4.0), (-3.0, 1.0, -4.0)
]
_test_gradient_common(loss, cases)
def test_gradient_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
(3.05, 3.0, 0.0), (2.2, 2.0, 1.0), (2.0, -1.0, 1.0),
(2.0, 2.2, -1.0), (-2.0, 1.0, -1.0)
]
_test_gradient_common(loss, cases)
def test_gradient_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
(3.05, 3.0, 0.0), (2.2, 2.0, 0.2), (2.0, -1.0, 5.8),
(2.0, 2.2, -0.2), (-2.0, 1.0, -5.8)
]
_test_gradient_common(loss, cases)
|
mit
|
BitTiger-MP/DS502-AI-Engineer
|
DS502-1702/Jason_course/Week4_Codelab2/class1_linear_regression.py
|
1
|
4115
|
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, linear_model
# 绘图函数
def plot_line(x, y, y_hat, line_color='blue'):
# Plot outputs
plt.scatter(x, y, color='black')
plt.plot(x, y_hat, color=line_color,
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
# 梯度函数 1/m * sum((y_hat - y) * x)
def linear_grad_func(theta, x, y):
# TODO compute gradient
# compute gradient
# np.dot((1 x m), (m x 2))
# 输出: 对于每一个w求偏导,w维度为1 x 2,所以grad维度也应该是1 x 2
# 输出维度: 1 x 2
grad = np.dot((linear_val_func(theta, x) - y).T, np.c_[np.ones(x.shape[0]), x])
grad = grad / x.shape[0]
return grad
# 前向传播求值函数 y_hat = wT * x
def linear_val_func(theta, x):
# forwarding
# 第一列添加bias 1
# 输出维度: m x 1
return np.dot(np.c_[np.ones(x.shape[0]), x], theta.T)
# 损失函数: cost_func = 1/m * sum((y_hat-y)^2)
def linear_cost_func(theta, x, y):
# TODO compute cost (loss)
# compute cost (loss)
y_hat = linear_val_func(theta, x)
cost = np.mean((y_hat-y)**2)
return cost
# 梯度下降法: theta = theta - alpha * partial_derivative(cost_func)
def linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=10000, converge_change=.001):
cost_iter = []
# 先算cost
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([0, cost])
cost_change = 1
i = 1
# 两个判断:1. 如果cost变化满足条件则退出迭代
# 2. 如果迭代次数达到10000次,也退出迭代
while cost_change > converge_change and i < max_iter:
pre_cost = cost
# 再算gradient
# compute gradient: partial_derivative(cost_func)
grad = linear_grad_func(theta, X_train, Y_train)
# update gradient
# TODO Update gradient
# 再乘以学习率,更新梯度
# theta is what we want to train !!!
theta = theta - lr * grad
# model is pre-defined: linear
# 再算cost
# compute loss
cost = linear_cost_func(theta, X_train, Y_train)
cost_iter.append([i, cost])
# 计算cost变化是否满足要求
cost_change = abs(cost - pre_cost)
i += 1
return theta, cost_iter
# 线性回归
def linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
theta = np.random.rand(1, X_train.shape[1]+1)
fitted_theta, cost_iter = linear_grad_desc(theta, X_train, Y_train, lr=0.1, max_iter=50000)
print('Coefficients: {}'.format(fitted_theta[0,-1]))
print('Intercept: {}'.format(fitted_theta[0,-2]))
print('MSE: {}'.format(np.sum((linear_val_func(fitted_theta, X_test) - Y_test)**2) / Y_test.shape[0]))
plot_line(X_test, Y_test, linear_val_func(fitted_theta, X_test))
# 用sklearn完成线性回归
def sklearn_linear_regression():
# load dataset
dataset = datasets.load_diabetes()
# Select only 2 dims
X = dataset.data[:, 2]
Y = dataset.target
# split dataset into training and testing
X_train = X[:-20, None]
X_test = X[-20:, None]
Y_train = Y[:-20, None]
Y_test = Y[-20:, None]
# Linear regression
regressor = linear_model.LinearRegression()
regressor.fit(X_train, Y_train)
print('Coefficients: {}'.format(regressor.coef_))
print('Intercept: {}'.format(regressor.intercept_))
print('MSE:{}'.format(np.mean((regressor.predict(X_test) - Y_test) ** 2)))
plot_line(X_test, Y_test, regressor.predict(X_test), line_color='red')
def main():
print('Class 1 Linear Regression Example')
linear_regression()
print ('')
print('sklearn Linear Regression Example')
sklearn_linear_regression()
if __name__ == "__main__":
main()
|
apache-2.0
|
kwailamchan/programming-languages
|
python/sklearn/examples/general/pipeline_anova_svm.py
|
3
|
1360
|
#-----------------------------------------------------------#
# Project: Pipeline Anova SVM
# Author: Kelly Chan
# Date: Apr 22 2014
#-----------------------------------------------------------#
print(__doc__)
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import Pipeline
def loadData():
# generating data
X, y = samples_generator.make_classification(n_features=20, \
n_informative=3, \
n_redundant=0, \
n_classes=4, \
n_clusters_per_class=2)
return X, y
# ANOVA SVM-C
def createANOVASVM():
# anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# svm
clf = svm.SVC(kernel='linear')
anova_svm = Pipeline([('anova', anova_filter), \
('svm', clf)])
return anova_svm
def predict(X, y, anova_svm):
anova_svm.fit(X, y)
target = anova_svm.predict(X)
return target
def test():
X, y = loadData()
anova_svm = createANOVASVM()
target = predict(X, y, anova_svm)
print target
if __name__ == '__main__':
test()
|
mit
|
nmayorov/scipy
|
scipy/signal/_peak_finding.py
|
4
|
48475
|
"""
Functions for identifying peaks in signals.
"""
import math
import numpy as np
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
from ._peak_finding_utils import (
_local_maxima_1d,
_select_by_peak_distance,
_peak_prominences,
_peak_widths
)
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
'peak_widths', 'find_peaks', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take.
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in range(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take.
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelextrema, argrelmax, find_peaks
Notes
-----
This function uses `argrelextrema` with np.less as comparator. Therefore, it
requires a strict inequality on both sides of a value to consider it a
minimum. This means flat minima (more than one sample wide) are not detected.
In case of 1-D `data` `find_peaks` can be used to detect all
local minima, including flat ones, by calling it with negated `data`.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelextrema, argrelmin, find_peaks
Notes
-----
This function uses `argrelextrema` with np.greater as comparator. Therefore,
it requires a strict inequality on both sides of a value to consider it a
maximum. This means flat maxima (more than one sample wide) are not detected.
In case of 1-D `data` `find_peaks` can be used to detect all
local maxima, including flat ones.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.nonzero(results)
def _arg_x_as_expected(value):
"""Ensure argument `x` is a 1-D C-contiguous array of dtype('float64').
Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x`
compatible with the signature of the wrapped Cython functions.
Returns
-------
value : ndarray
A 1-D C-contiguous array with dtype('float64').
"""
value = np.asarray(value, order='C', dtype=np.float64)
if value.ndim != 1:
raise ValueError('`x` must be a 1-D array')
return value
def _arg_peaks_as_expected(value):
"""Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp').
Used in `peak_prominences` and `peak_widths` to make `peaks` compatible
with the signature of the wrapped Cython functions.
Returns
-------
value : ndarray
A 1-D C-contiguous array with dtype('intp').
"""
value = np.asarray(value)
if value.size == 0:
# Empty arrays default to np.float64 but are valid input
value = np.array([], dtype=np.intp)
try:
# Safely convert to C-contiguous array of type np.intp
value = value.astype(np.intp, order='C', casting='safe',
subok=False, copy=False)
except TypeError as e:
raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e
if value.ndim != 1:
raise ValueError('`peaks` must be a 1-D array')
return value
def _arg_wlen_as_expected(value):
"""Ensure argument `wlen` is of type `np.intp` and larger than 1.
Used in `peak_prominences` and `peak_widths`.
Returns
-------
value : np.intp
The original `value` rounded up to an integer or -1 if `value` was
None.
"""
if value is None:
# _peak_prominences expects an intp; -1 signals that no value was
# supplied by the user
value = -1
elif 1 < value:
# Round up to a positive integer
if not np.can_cast(value, np.intp, "safe"):
value = math.ceil(value)
value = np.intp(value)
else:
raise ValueError('`wlen` must be larger than 1, was {}'
.format(value))
return value
def peak_prominences(x, peaks, wlen=None):
"""
Calculate the prominence of each peak in a signal.
The prominence of a peak measures how much a peak stands out from the
surrounding baseline of the signal and is defined as the vertical distance
between the peak and its lowest contour line.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
wlen : int, optional
A window length in samples that optionally limits the evaluated area for
each peak to a subset of `x`. The peak is always placed in the middle of
the window therefore the given length is rounded up to the next odd
integer. This parameter can speed up the calculation (see Notes).
Returns
-------
prominences : ndarray
The calculated prominences for each peak in `peaks`.
left_bases, right_bases : ndarray
The peaks' bases as indices in `x` to the left and right of each peak.
The higher base of each pair is a peak's lowest contour line.
Raises
------
ValueError
If a value in `peaks` is an invalid index for `x`.
Warns
-----
PeakPropertyWarning
For indices in `peaks` that don't point to valid local maxima in `x`,
the returned prominence will be 0 and this warning is raised. This
also happens if `wlen` is smaller than the plateau size of a peak.
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_widths
Calculate the width of peaks.
Notes
-----
Strategy to compute a peak's prominence:
1. Extend a horizontal line from the current peak to the left and right
until the line either reaches the window border (see `wlen`) or
intersects the signal again at the slope of a higher peak. An
intersection with a peak of the same height is ignored.
2. On each side find the minimal signal value within the interval defined
above. These points are the peak's bases.
3. The higher one of the two bases marks the peak's lowest contour line. The
prominence can then be calculated as the vertical difference between the
peaks height itself and its lowest contour line.
Searching for the peak's bases can be slow for large `x` with periodic
behavior because large chunks or even the full signal need to be evaluated
for the first algorithmic step. This evaluation area can be limited with the
parameter `wlen` which restricts the algorithm to a window around the
current peak and can shorten the calculation time if the window length is
short in relation to `x`.
However, this may stop the algorithm from finding the true global contour
line if the peak's true bases are outside this window. Instead, a higher
contour line is found within the restricted window leading to a smaller
calculated prominence. In practice, this is only relevant for the highest set
of peaks in `x`. This behavior may even be used intentionally to calculate
"local" prominences.
.. versionadded:: 1.1.0
References
----------
.. [1] Wikipedia Article for Topographic Prominence:
https://en.wikipedia.org/wiki/Topographic_prominence
Examples
--------
>>> from scipy.signal import find_peaks, peak_prominences
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate prominences
>>> peaks, _ = find_peaks(x)
>>> prominences = peak_prominences(x, peaks)[0]
>>> prominences
array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
0.47822491, 2.48340261, 0.47822491])
Calculate the height of each peak's contour line and plot the results
>>> contour_heights = x[peaks] - prominences
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
>>> plt.show()
Let's evaluate a second example that demonstrates several edge cases for
one peak at index 5.
>>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
>>> peaks = np.array([5])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
>>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
(array([3.]), array([2]), array([6]))
Note how the peak at index 3 of the same height is not considered as a
border while searching for the left base. Instead, two minima at 0 and 2
are found in which case the one closer to the evaluated peak is always
chosen. On the right side, however, the base must be placed at 6 because the
higher peak represents the right border to the evaluated area.
>>> peak_prominences(x, peaks, wlen=3.1)
(array([2.]), array([4]), array([6]))
Here, we restricted the algorithm to a window from 3 to 7 (the length is 5
samples because `wlen` was rounded up to the next odd integer). Thus, the
only two candidates in the evaluated area are the two neighboring samples
and a smaller prominence is calculated.
"""
x = _arg_x_as_expected(x)
peaks = _arg_peaks_as_expected(peaks)
wlen = _arg_wlen_as_expected(wlen)
return _peak_prominences(x, peaks, wlen)
def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
"""
Calculate the width of each peak in a signal.
This function calculates the width of a peak in samples at a relative
distance to the peak's height and prominence.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
rel_height : float, optional
Chooses the relative height at which the peak width is measured as a
percentage of its prominence. 1.0 calculates the width of the peak at
its lowest contour line while 0.5 evaluates at half the prominence
height. Must be at least 0. See notes for further explanation.
prominence_data : tuple, optional
A tuple of three arrays matching the output of `peak_prominences` when
called with the same arguments `x` and `peaks`. This data are calculated
internally if not provided.
wlen : int, optional
A window length in samples passed to `peak_prominences` as an optional
argument for internal calculation of `prominence_data`. This argument
is ignored if `prominence_data` is given.
Returns
-------
widths : ndarray
The widths for each peak in samples.
width_heights : ndarray
The height of the contour lines at which the `widths` where evaluated.
left_ips, right_ips : ndarray
Interpolated positions of left and right intersection points of a
horizontal line at the respective evaluation height.
Raises
------
ValueError
If `prominence_data` is supplied but doesn't satisfy the condition
``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
has the wrong dtype, is not C-contiguous or does not have the same
shape.
Warns
-----
PeakPropertyWarning
Raised if any calculated width is 0. This may stem from the supplied
`prominence_data` or if `rel_height` is set to 0.
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_prominences
Calculate the prominence of peaks.
Notes
-----
The basic algorithm to calculate a peak's width is as follows:
* Calculate the evaluation height :math:`h_{eval}` with the formula
:math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
height of the peak itself, :math:`P` is the peak's prominence and
:math:`R` a positive ratio specified with the argument `rel_height`.
* Draw a horizontal line at the evaluation height to both sides, starting at
the peak's current vertical position until the lines either intersect a
slope, the signal border or cross the vertical position of the peak's
base (see `peak_prominences` for an definition). For the first case,
intersection with the signal, the true intersection point is estimated
with linear interpolation.
* Calculate the width as the horizontal distance between the chosen
endpoints on both sides. As a consequence of this the maximal possible
width for each peak is the horizontal distance between its bases.
As shown above to calculate a peak's width its prominence and bases must be
known. You can supply these yourself with the argument `prominence_data`.
Otherwise, they are internally calculated (see `peak_prominences`).
.. versionadded:: 1.1.0
Examples
--------
>>> from scipy.signal import chirp, find_peaks, peak_widths
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate their widths at the relative height of 0.5
(contour line at half the prominence height) and 1 (at the lowest contour
line at full prominence height).
>>> peaks, _ = find_peaks(x)
>>> results_half = peak_widths(x, peaks, rel_height=0.5)
>>> results_half[0] # widths
array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081,
35.46729324, 41.30429622, 181.93835853, 45.37078546])
>>> results_full = peak_widths(x, peaks, rel_height=1)
>>> results_full[0] # widths
array([181.9396084 , 72.99284945, 61.28657872, 373.84622694,
61.78404617, 72.48822812, 253.09161876, 79.36860878])
Plot signal, peaks and contour lines at which the widths where calculated
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.hlines(*results_half[1:], color="C2")
>>> plt.hlines(*results_full[1:], color="C3")
>>> plt.show()
"""
x = _arg_x_as_expected(x)
peaks = _arg_peaks_as_expected(peaks)
if prominence_data is None:
# Calculate prominence if not supplied and use wlen if supplied.
wlen = _arg_wlen_as_expected(wlen)
prominence_data = _peak_prominences(x, peaks, wlen)
return _peak_widths(x, peaks, rel_height, *prominence_data)
def _unpack_condition_args(interval, x, peaks):
"""
Parse condition arguments for `find_peaks`.
Parameters
----------
interval : number or ndarray or sequence
Either a number or ndarray or a 2-element sequence of the former. The
first value is always interpreted as `imin` and the second, if supplied,
as `imax`.
x : ndarray
The signal with `peaks`.
peaks : ndarray
An array with indices used to reduce `imin` and / or `imax` if those are
arrays.
Returns
-------
imin, imax : number or ndarray or None
Minimal and maximal value in `argument`.
Raises
------
ValueError :
If interval border is given as array and its size does not match the size
of `x`.
Notes
-----
.. versionadded:: 1.1.0
"""
try:
imin, imax = interval
except (TypeError, ValueError):
imin, imax = (interval, None)
# Reduce arrays if arrays
if isinstance(imin, np.ndarray):
if imin.size != x.size:
raise ValueError('array size of lower interval border must match x')
imin = imin[peaks]
if isinstance(imax, np.ndarray):
if imax.size != x.size:
raise ValueError('array size of upper interval border must match x')
imax = imax[peaks]
return imin, imax
def _select_by_property(peak_properties, pmin, pmax):
"""
Evaluate where the generic property of peaks confirms to an interval.
Parameters
----------
peak_properties : ndarray
An array with properties for each peak.
pmin : None or number or ndarray
Lower interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
pmax : None or number or ndarray
Upper interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peak_properties` confirms to the
interval.
See Also
--------
find_peaks
Notes
-----
.. versionadded:: 1.1.0
"""
keep = np.ones(peak_properties.size, dtype=bool)
if pmin is not None:
keep &= (pmin <= peak_properties)
if pmax is not None:
keep &= (peak_properties <= pmax)
return keep
def _select_by_peak_threshold(x, peaks, tmin, tmax):
"""
Evaluate which peaks fulfill the threshold condition.
Parameters
----------
x : ndarray
A 1-D array which is indexable by `peaks`.
peaks : ndarray
Indices of peaks in `x`.
tmin, tmax : scalar or ndarray or None
Minimal and / or maximal required thresholds. If supplied as ndarrays
their size must match `peaks`. ``None`` is interpreted as an open
border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peaks` fulfill the threshold
condition.
left_thresholds, right_thresholds : ndarray
Array matching `peak` containing the thresholds of each peak on
both sides.
Notes
-----
.. versionadded:: 1.1.0
"""
# Stack thresholds on both sides to make min / max operations easier:
# tmin is compared with the smaller, and tmax with the greater thresold to
# each peak's side
stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
x[peaks] - x[peaks + 1]])
keep = np.ones(peaks.size, dtype=bool)
if tmin is not None:
min_thresholds = np.min(stacked_thresholds, axis=0)
keep &= (tmin <= min_thresholds)
if tmax is not None:
max_thresholds = np.max(stacked_thresholds, axis=0)
keep &= (max_thresholds <= tmax)
return keep, stacked_thresholds[0], stacked_thresholds[1]
def find_peaks(x, height=None, threshold=None, distance=None,
prominence=None, width=None, wlen=None, rel_height=0.5,
plateau_size=None):
"""
Find peaks inside a signal based on peak properties.
This function takes a 1-D array and finds all local maxima by
simple comparison of neighboring values. Optionally, a subset of these
peaks can be selected by specifying conditions for a peak's properties.
Parameters
----------
x : sequence
A signal with peaks.
height : number or ndarray or sequence, optional
Required height of peaks. Either a number, ``None``, an array matching
`x` or a 2-element sequence of the former. The first element is
always interpreted as the minimal and the second, if supplied, as the
maximal required height.
threshold : number or ndarray or sequence, optional
Required threshold of peaks, the vertical distance to its neighboring
samples. Either a number, ``None``, an array matching `x` or a
2-element sequence of the former. The first element is always
interpreted as the minimal and the second, if supplied, as the maximal
required threshold.
distance : number, optional
Required minimal horizontal distance (>= 1) in samples between
neighbouring peaks. Smaller peaks are removed first until the condition
is fulfilled for all remaining peaks.
prominence : number or ndarray or sequence, optional
Required prominence of peaks. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required prominence.
width : number or ndarray or sequence, optional
Required width of peaks in samples. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required width.
wlen : int, optional
Used for calculation of the peaks prominences, thus it is only used if
one of the arguments `prominence` or `width` is given. See argument
`wlen` in `peak_prominences` for a full description of its effects.
rel_height : float, optional
Used for calculation of the peaks width, thus it is only used if `width`
is given. See argument `rel_height` in `peak_widths` for a full
description of its effects.
plateau_size : number or ndarray or sequence, optional
Required size of the flat top of peaks in samples. Either a number,
``None``, an array matching `x` or a 2-element sequence of the former.
The first element is always interpreted as the minimal and the second,
if supplied as the maximal required plateau size.
.. versionadded:: 1.2.0
Returns
-------
peaks : ndarray
Indices of peaks in `x` that satisfy all given conditions.
properties : dict
A dictionary containing properties of the returned peaks which were
calculated as intermediate results during evaluation of the specified
conditions:
* 'peak_heights'
If `height` is given, the height of each peak in `x`.
* 'left_thresholds', 'right_thresholds'
If `threshold` is given, these keys contain a peaks vertical
distance to its neighbouring samples.
* 'prominences', 'right_bases', 'left_bases'
If `prominence` is given, these keys are accessible. See
`peak_prominences` for a description of their content.
* 'width_heights', 'left_ips', 'right_ips'
If `width` is given, these keys are accessible. See `peak_widths`
for a description of their content.
* 'plateau_sizes', left_edges', 'right_edges'
If `plateau_size` is given, these keys are accessible and contain
the indices of a peak's edges (edges are still part of the
plateau) and the calculated plateau sizes.
.. versionadded:: 1.2.0
To calculate and return properties without excluding peaks, provide the
open interval ``(None, None)`` as a value to the appropriate argument
(excluding `distance`).
Warns
-----
PeakPropertyWarning
Raised if a peak's properties have unexpected values (see
`peak_prominences` and `peak_widths`).
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks_cwt
Find peaks using the wavelet transformation.
peak_prominences
Directly calculate the prominence of peaks.
peak_widths
Directly calculate the width of peaks.
Notes
-----
In the context of this function, a peak or local maximum is defined as any
sample whose two direct neighbours have a smaller amplitude. For flat peaks
(more than one sample of equal amplitude wide) the index of the middle
sample is returned (rounded down in case the number of samples is even).
For noisy signals the peak locations can be off because the noise might
change the position of local maxima. In those cases consider smoothing the
signal before searching for peaks or use other peak finding and fitting
methods (like `find_peaks_cwt`).
Some additional comments on specifying conditions:
* Almost all conditions (excluding `distance`) can be given as half-open or
closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open
interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
:math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
as well, which returns the matching properties without exclusion of peaks.
* The border is always included in the interval used to select valid peaks.
* For several conditions the interval borders can be specified with
arrays matching `x` in shape which enables dynamic constrains based on
the sample position.
* The conditions are evaluated in the following order: `plateau_size`,
`height`, `threshold`, `distance`, `prominence`, `width`. In most cases
this order is the fastest one because faster operations are applied first
to reduce the number of peaks that need to be evaluated later.
* While indices in `peaks` are guaranteed to be at least `distance` samples
apart, edges of flat peaks may be closer than the allowed `distance`.
* Use `wlen` to reduce the time it takes to evaluate the conditions for
`prominence` or `width` if `x` is large or has many local maxima
(see `peak_prominences`).
.. versionadded:: 1.1.0
Examples
--------
To demonstrate this function's usage we use a signal `x` supplied with
SciPy (see `scipy.misc.electrocardiogram`). Let's find all peaks (local
maxima) in `x` whose amplitude lies above 0.
>>> import matplotlib.pyplot as plt
>>> from scipy.misc import electrocardiogram
>>> from scipy.signal import find_peaks
>>> x = electrocardiogram()[2000:4000]
>>> peaks, _ = find_peaks(x, height=0)
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.plot(np.zeros_like(x), "--", color="gray")
>>> plt.show()
We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
`x` in size to reflect a changing condition for different parts of the
signal.
>>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
>>> peaks, _ = find_peaks(x, height=(-border, border))
>>> plt.plot(x)
>>> plt.plot(-border, "--", color="gray")
>>> plt.plot(border, ":", color="gray")
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Another useful condition for periodic signals can be given with the
`distance` argument. In this case, we can easily select the positions of
QRS complexes within the electrocardiogram (ECG) by demanding a distance of
at least 150 samples.
>>> peaks, _ = find_peaks(x, distance=150)
>>> np.diff(peaks)
array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Especially for noisy signals peaks can be easily grouped by their
prominence (see `peak_prominences`). E.g., we can select all peaks except
for the mentioned QRS complexes by limiting the allowed prominence to 0.6.
>>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
>>> properties["prominences"].max()
0.5049999999999999
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
And, finally, let's examine a different section of the ECG which contains
beat forms of different shape. To select only the atypical heart beats, we
combine two conditions: a minimal prominence of 1 and width of at least 20
samples.
>>> x = electrocardiogram()[17000:18000]
>>> peaks, properties = find_peaks(x, prominence=1, width=20)
>>> properties["prominences"], properties["widths"]
(array([1.495, 2.3 ]), array([36.93773946, 39.32723577]))
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
... ymax = x[peaks], color = "C1")
>>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
... xmax=properties["right_ips"], color = "C1")
>>> plt.show()
"""
# _argmaxima1d expects array of dtype 'float64'
x = _arg_x_as_expected(x)
if distance is not None and distance < 1:
raise ValueError('`distance` must be greater or equal to 1')
peaks, left_edges, right_edges = _local_maxima_1d(x)
properties = {}
if plateau_size is not None:
# Evaluate plateau size
plateau_sizes = right_edges - left_edges + 1
pmin, pmax = _unpack_condition_args(plateau_size, x, peaks)
keep = _select_by_property(plateau_sizes, pmin, pmax)
peaks = peaks[keep]
properties["plateau_sizes"] = plateau_sizes
properties["left_edges"] = left_edges
properties["right_edges"] = right_edges
properties = {key: array[keep] for key, array in properties.items()}
if height is not None:
# Evaluate height condition
peak_heights = x[peaks]
hmin, hmax = _unpack_condition_args(height, x, peaks)
keep = _select_by_property(peak_heights, hmin, hmax)
peaks = peaks[keep]
properties["peak_heights"] = peak_heights
properties = {key: array[keep] for key, array in properties.items()}
if threshold is not None:
# Evaluate threshold condition
tmin, tmax = _unpack_condition_args(threshold, x, peaks)
keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
x, peaks, tmin, tmax)
peaks = peaks[keep]
properties["left_thresholds"] = left_thresholds
properties["right_thresholds"] = right_thresholds
properties = {key: array[keep] for key, array in properties.items()}
if distance is not None:
# Evaluate distance condition
keep = _select_by_peak_distance(peaks, x[peaks], distance)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if prominence is not None or width is not None:
# Calculate prominence (required for both conditions)
wlen = _arg_wlen_as_expected(wlen)
properties.update(zip(
['prominences', 'left_bases', 'right_bases'],
_peak_prominences(x, peaks, wlen=wlen)
))
if prominence is not None:
# Evaluate prominence condition
pmin, pmax = _unpack_condition_args(prominence, x, peaks)
keep = _select_by_property(properties['prominences'], pmin, pmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if width is not None:
# Calculate widths
properties.update(zip(
['widths', 'width_heights', 'left_ips', 'right_ips'],
_peak_widths(x, peaks, rel_height, properties['prominences'],
properties['left_bases'], properties['right_bases'])
))
# Evaluate width condition
wmin, wmax = _unpack_condition_args(width, x, peaks)
keep = _select_by_property(properties['widths'], wmin, wmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
return peaks, properties
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.nonzero(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.nonzero(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in range(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.empty_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None,
min_snr=1, noise_perc=10, window_size=None):
"""
Find peaks in a 1-D array with wavelet transformation.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is the first value of the widths array i.e. widths[0].
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
Returns
-------
peaks_indices : ndarray
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Continuous wavelet transform.
find_peaks
Find peaks inside a signal based on peak properties.
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`.
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
widths = np.asarray(widths)
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths, window_size=window_size)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
window_size=window_size, min_snr=min_snr,
noise_perc=noise_perc)
max_locs = np.asarray([x[1][0] for x in filtered])
max_locs.sort()
return max_locs
|
bsd-3-clause
|
cchwala/pycomlink
|
pycomlink/util/temporal.py
|
2
|
2643
|
import pandas as pd
def aggregate_df_onto_DatetimeIndex(
df, new_index, method, label="right", new_index_tz="utc"
):
"""
Aggregate a DataFrame or Series using a given DatetimeIndex
Parameters
----------
df : pandas.DataFrame
The dataframe that should be reindexed
new_index : pandas.DatetimeIndex
The time stamp index on which `df` should be aggregated
method : numpy function
The function to be used for aggregation via
`DataFrame.groupby('new_time_ix').agg(method)`
label : str {'right', 'left'}, optional
Which side of the aggregated period to take the label for the new
index from
new_index_tz : str, optional
Defaults to 'utc'. Note that if `new_index` already has time zone
information, this kwarg is ignored
Returns
-------
df_reindexed : pandas.DataFrame
"""
if label == "right":
fill_method = "bfill"
elif label == "left":
fill_method = "ffill"
else:
raise NotImplementedError('`label` must be "left" or "right"')
# Make sure we work with a DataFrame and make a copy of it
df_temp = pd.DataFrame(df).copy()
# Generate DataFrame with desired DatetimeIndex as data,
# which will later be reindexed by DatetimeIndex of original DataFrame
df_new_t = pd.DataFrame(index=new_index, data={"time": new_index})
# Update time zone info if there is none
if not df_new_t.index.tzinfo:
df_new_t.index = df_new_t.index.tz_localize(new_index_tz)
# Crop both time series to make them cover the same period.
# This is to avoid the ffill or bfill to run outside of the
# range of the new index, which produces wrong result for the
# end point of the time series in the aggregated result
t_start = max(df_temp.index.min(), df_new_t.index.min())
t_stop = min(df_temp.index.max(), df_new_t.index.max())
df_new_t = df_new_t.loc[t_start:t_stop]
df_temp = df_temp.loc[t_start:t_stop]
# Reindex to get the forward filled or backwar filled time stamp of the
# new index which can be used for aggregation in the next step
df_new_t = df_new_t.reindex(df_temp.index, method=fill_method)
# Aggregate data onto new DatetimeIndex
df_temp["new_time_ix"] = df_new_t.time
df_reindexed = df_temp.groupby("new_time_ix").agg(method)
# Update name and timezone of new index
df_reindexed.index.name = df_temp.index.name
if not df_reindexed.index.tzinfo:
df_reindexed.index = df_reindexed.index.tz_localize("UTC").tz_convert(
df_temp.index.tzinfo
)
return df_reindexed
|
bsd-3-clause
|
lukasmarshall/embedded-network-model
|
luke_combine_excel_files.py
|
1
|
1933
|
import os
import glob
import pandas as pd
from os import listdir
from os.path import isfile, join
import csv
from collections import OrderedDict
def concatenate(input_directory="/mnt/c/Users/Emily/Documents/Test",outfile="concatenated.csv"):
# os.chdir(input_directory)
# fileList=glob.glob("*.csv")
dfList=[]
# for filename in fileList:
# print(filename)
# df=pandas.read_csv(filename,header=True)
# dfList.append(df)
files = [f for f in listdir(input_directory) if isfile(join(input_directory, f))]
print(files)
output = OrderedDict()
participant_names = []
for file in files:
file_path = input_directory+"/"+file
# Open the file
with open(file_path) as f:
# Create a csv reader object
reader = csv.DictReader(f)
# Python dict to store our output
# Loop through every line of the csv file
for line in reader:
# Get the date as a string
date = line['date_time']
# If the date is not in our output object, add it and add the relevant data.
output[date] = {} if not date in output else output[date]
# Update the line in our output object with the SGSC data
output[date].update(line)
# Building a list of participant names
del line['date_time']
for key in line:
if key not in participant_names:
participant_names.append(key)
print(participant_names)
participant_names.insert(0, 'date_time')
with open(outfile, "w") as f:
writer = csv.DictWriter(f, participant_names)
writer.writeheader()
for date in output:
writer.writerow(output[date])
concatenate()
|
mit
|
Arcanemagus/plexpy
|
lib/tqdm/_tqdm_gui.py
|
4
|
13326
|
"""
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm_gui import tgrange[, tqdm_gui]
>>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
# import sys
from time import time
from ._utils import _range
# to inherit from the tqdm class
from ._tqdm import tqdm, TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange']
class tqdm_gui(tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if self.total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if self.total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
# Inline due to multiple calls
total = self.total
ax = self.ax
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = self.n / elapsed
# update line data
self.xdata.append(self.n * 100.0 / total
if total else cur_t)
self.ydata.append(y)
self.zdata.append(z)
# Discard old values
if (not total) and elapsed > 66:
self.xdata.popleft()
self.ydata.popleft()
self.zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
self.line1.set_data(self.xdata, self.ydata)
self.line2.set_data(self.xdata, self.zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [self.n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in self.xdata]
self.line1.set_data(t_ago, self.ydata)
self.line2.set_data(t_ago, self.zdata)
ax.set_title(self.format_meter(
self.n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def tgrange(*args, **kwargs):
"""
A shortcut for tqdm_gui(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm_gui(_range(*args), **kwargs)
|
gpl-3.0
|
adammenges/statsmodels
|
statsmodels/datasets/nile/data.py
|
25
|
1872
|
"""Nile River Flows."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = """Nile River flows at Ashwan 1871-1970"""
SOURCE = """
This data is first analyzed in:
Cobb, G. W. 1978. "The Problem of the Nile: Conditional Solution to a
Changepoint Problem." *Biometrika*. 65.2, 243-51.
"""
DESCRSHORT = """This dataset contains measurements on the annual flow of
the Nile as measured at Ashwan for 100 years from 1871-1970."""
DESCRLONG = DESCRSHORT + " There is an apparent changepoint near 1898."
#suggested notes
NOTE = """::
Number of observations: 100
Number of variables: 2
Variable name definitions:
year - the year of the observations
volumne - the discharge at Aswan in 10^8, m^3
"""
from numpy import recfromtxt, array
from pandas import Series, DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the Nile data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
names = list(data.dtype.names)
endog_name = 'volume'
endog = array(data[endog_name], dtype=float)
dataset = Dataset(data=data, names=[endog_name], endog=endog,
endog_name=endog_name)
return dataset
def load_pandas():
data = DataFrame(_get_data())
# TODO: time series
endog = Series(data['volume'], index=data['year'].astype(int))
dataset = Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/nile.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
|
bsd-3-clause
|
Garrett-R/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
7
|
8173
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import re
import warnings
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
assert_true(re.search(r'\bnot fully connected\b',
str(warning_list[0].message)))
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
cycleuser/GeoPython
|
geopytool/GLMultiDimension.py
|
2
|
23440
|
from geopytool.ImportDependence import *
from geopytool.CustomClass import *
from PyQt5 import QtGui # (the example applies equally well to PySide)
import pyqtgraph as pg
import pandas as pd
import pyqtgraph.opengl as gl
import numpy as np
class GLMultiDimension(AppForm):
Element = [u'Cs', u'Tl', u'Rb', u'Ba', u'W', u'Th', u'U', u'Nb', u'Ta', u'K', u'La', u'Ce', u'Pb', u'Pr', u'Mo',
u'Sr', u'P', u'Nd', u'F', u'Sm', u'Zr', u'Hf', u'Eu', u'Sn', u'Sb', u'Ti', u'Gd', u'Tb', u'Dy',
u'Li',
u'Y', u'Ho', u'Er', u'Tm', u'Yb', u'Lu']
StandardsName = ['OIB', 'EMORB', 'C1', 'PM', 'NMORB']
NameChosen = 'OIB'
Standards = {
'OIB': {'Cs': 0.387, 'Tl': 0.077, 'Rb': 31, 'Ba': 350, 'W': 0.56, 'Th': 4, 'U': 1.02, 'Nb': 48, 'Ta': 2.7,
'K': 12000, 'La': 37, 'Ce': 80, 'Pb': 3.2, 'Pr': 9.7, 'Mo': 2.4, 'Sr': 660, 'P': 2700, 'Nd': 38.5,
'F': 1150, 'Sm': 10, 'Zr': 280, 'Hf': 7.8, 'Eu': 3, 'Sn': 2.7, 'Sb': 0.03, 'Ti': 17200, 'Gd': 7.62,
'Tb': 1.05, 'Dy': 5.6, 'Li': 5.6, 'Y': 29, 'Ho': 1.06, 'Er': 2.62, 'Tm': 0.35, 'Yb': 2.16, 'Lu': 0.3},
'EMORB': {'Cs': 0.063, 'Tl': 0.013, 'Rb': 5.04, 'Ba': 57, 'W': 0.092, 'Th': 0.6, 'U': 0.18, 'Nb': 8.3,
'Ta': 0.47, 'K': 2100, 'La': 6.3, 'Ce': 15, 'Pb': 0.6, 'Pr': 2.05, 'Mo': 0.47, 'Sr': 155, 'P': 620,
'Nd': 9, 'F': 250, 'Sm': 2.6, 'Zr': 73, 'Hf': 2.03, 'Eu': 0.91, 'Sn': 0.8, 'Sb': 0.01, 'Ti': 6000,
'Gd': 2.97, 'Tb': 0.53, 'Dy': 3.55, 'Li': 3.5, 'Y': 22, 'Ho': 0.79, 'Er': 2.31, 'Tm': 0.356,
'Yb': 2.37, 'Lu': 0.354},
'C1': {'Cs': 0.188, 'Tl': 0.14, 'Rb': 2.32, 'Ba': 2.41, 'W': 0.095, 'Th': 0.029, 'U': 0.008, 'Nb': 0.246,
'Ta': 0.014, 'K': 545, 'La': 0.237, 'Ce': 0.612, 'Pb': 2.47, 'Pr': 0.095, 'Mo': 0.92, 'Sr': 7.26,
'P': 1220, 'Nd': 0.467, 'F': 60.7, 'Sm': 0.153, 'Zr': 3.87, 'Hf': 0.1066, 'Eu': 0.058, 'Sn': 1.72,
'Sb': 0.16, 'Ti': 445, 'Gd': 0.2055, 'Tb': 0.0374, 'Dy': 0.254, 'Li': 1.57, 'Y': 1.57, 'Ho': 0.0566,
'Er': 0.1655, 'Tm': 0.0255, 'Yb': 0.17, 'Lu': 0.0254},
'PM': {'Cs': 0.032, 'Tl': 0.005, 'Rb': 0.635, 'Ba': 6.989, 'W': 0.02, 'Th': 0.085, 'U': 0.021, 'Nb': 0.713,
'Ta': 0.041, 'K': 250, 'La': 0.687, 'Ce': 1.775, 'Pb': 0.185, 'Pr': 0.276, 'Mo': 0.063, 'Sr': 21.1,
'P': 95, 'Nd': 1.354, 'F': 26, 'Sm': 0.444, 'Zr': 11.2, 'Hf': 0.309, 'Eu': 0.168, 'Sn': 0.17,
'Sb': 0.005, 'Ti': 1300, 'Gd': 0.596, 'Tb': 0.108, 'Dy': 0.737, 'Li': 1.6, 'Y': 4.55, 'Ho': 0.164,
'Er': 0.48, 'Tm': 0.074, 'Yb': 0.493, 'Lu': 0.074},
'NMORB': {'Cs': 0.007, 'Tl': 0.0014, 'Rb': 0.56, 'Ba': 6.3, 'W': 0.01, 'Th': 0.12, 'U': 0.047, 'Nb': 2.33,
'Ta': 0.132, 'K': 600, 'La': 2.5, 'Ce': 7.5, 'Pb': 0.3, 'Pr': 1.32, 'Mo': 0.31, 'Sr': 90, 'P': 510,
'Nd': 7.3, 'F': 210, 'Sm': 2.63, 'Zr': 74, 'Hf': 2.05, 'Eu': 1.02, 'Sn': 1.1, 'Sb': 0.01, 'Ti': 7600,
'Gd': 3.68, 'Tb': 0.67, 'Dy': 4.55, 'Li': 4.3, 'Y': 28, 'Ho': 1.01, 'Er': 2.97, 'Tm': 0.456,
'Yb': 3.05, 'Lu': 0.455}, }
Lines = []
Tags = []
xlabel = 'x'
ylabel = 'y'
zlabel = 'z'
description = 'X-Y- diagram'
unuseful = ['Name',
'Author',
'DataType',
'Label',
'Marker',
'Color',
'Size',
'Alpha',
'Style',
'Width',
'Type',
'Tag']
width_plot = 100.0
height_plot = 100.0
depth_plot= 100.0
width_load = width_plot
height_load = height_plot
depth_load = depth_plot
polygon = []
polyline = []
line = []
strgons = []
strlines = []
strpolylines = []
extent = 0
Left = 0
Right = 0
Up = 0
Down = 0
FitLevel=3
FadeGroups=100
ShapeGroups=200
Xleft,Xright,Ydown,Yup,Ztail,Zhead=0,0,0,0,0,0
LimSet= False
def __init__(self, parent=None, df=pd.DataFrame()):
QMainWindow.__init__(self, parent)
self.setWindowTitle(self.description)
self.items = []
self._df = df
if (len(df) > 0):
self._changed = True
# print('DataFrame recieved to GLMultiDimension')
self.raw = df
self.rawitems = self.raw.columns.values.tolist()
for i in self.rawitems:
if i not in self.unuseful:
self.items.append(i)
else:
pass
self.create_main_frame()
self.create_status_bar()
self.polygon = 0
self.polyline = 0
self.flag = 0
def create_main_frame(self):
self.main_frame = QWidget()
#self.main_frame.setFixedSize(self.width(), self.width())
self.dpi = 128
self.ShapeGroups =200
self.view = gl.GLViewWidget()
#self.view = pg.PlotWidget()
#self.view.setFixedSize(self.width(),self.height())
self.view.setFixedSize(self.width(), self.width())
self.view.setParent(self.main_frame)
# Other GUI controls
self.save_button = QPushButton('&Save')
self.save_button.clicked.connect(self.saveImgFile)
self.draw_button = QPushButton('&Reset')
self.draw_button.clicked.connect(self.Reset)
self.load_button = QPushButton('&Load')
#self.load_button.clicked.connect(self.Load)
self.fit_cb= QCheckBox('&PolyFit')
self.fit_cb.setChecked(False)
self.fit_cb.stateChanged.connect(self.Magic) # int
self.fit_label = QLabel('Exp')
self.fit_seter = QLineEdit(self)
self.fit_seter.textChanged[str].connect(self.FitChanged)
self.shape_cb= QCheckBox('&Shape')
self.shape_cb.setChecked(False)
self.shape_cb.stateChanged.connect(self.Magic) # int
self.Normalize_cb = QCheckBox('&Normalize')
self.Normalize_cb.setChecked(False)
self.Normalize_cb.stateChanged.connect(self.Magic) # int
self.norm_slider_label = QLabel('Standard:' + self.NameChosen)
self.norm_slider = QSlider(Qt.Horizontal)
self.norm_slider.setRange(0, 4)
self.norm_slider.setValue(0)
self.norm_slider.setTracking(True)
self.norm_slider.setTickPosition(QSlider.TicksBothSides)
self.norm_slider.valueChanged.connect(self.Magic) # int
self.x_element = QSlider(Qt.Horizontal)
self.x_element.setRange(0, len(self.items) - 1)
self.x_element.setValue(0)
self.x_element.setTracking(True)
self.x_element.setTickPosition(QSlider.TicksBothSides)
self.x_element.valueChanged.connect(self.Magic) # int
self.x_element_label = QLabel('X')
self.logx_cb = QCheckBox('&Log')
self.logx_cb.setChecked(False)
self.logx_cb.stateChanged.connect(self.Magic) # int
self.y_element = QSlider(Qt.Horizontal)
self.y_element.setRange(0, len(self.items) - 1)
self.y_element.setValue(1)
self.y_element.setTracking(True)
self.y_element.setTickPosition(QSlider.TicksBothSides)
self.y_element.valueChanged.connect(self.Magic) # int
self.y_element_label = QLabel('Y')
self.logy_cb = QCheckBox('&Log')
self.logy_cb.setChecked(False)
self.logy_cb.stateChanged.connect(self.Magic) # int
self.z_element = QSlider(Qt.Horizontal)
self.z_element.setRange(0, len(self.items) - 1)
self.z_element.setValue(2)
self.z_element.setTracking(True)
self.z_element.setTickPosition(QSlider.TicksBothSides)
self.z_element.valueChanged.connect(self.Magic) # int
self.z_element_label = QLabel('Z')
self.logz_cb = QCheckBox('&Log')
self.logz_cb.setChecked(False)
self.logz_cb.stateChanged.connect(self.Magic) # int
self.xlim_seter_left_label = QLabel('Xleft')
self.xlim_seter_left = QLineEdit(self)
self.xlim_seter_left.textChanged[str].connect(self.XleftChanged)
self.xlim_seter_right_label = QLabel('Xright')
self.xlim_seter_right = QLineEdit(self)
self.xlim_seter_right.textChanged[str].connect(self.XrightChanged)
self.ylim_seter_down_label = QLabel('Ydown')
self.ylim_seter_down = QLineEdit(self)
self.ylim_seter_down.textChanged[str].connect(self.YdownChanged)
self.ylim_seter_up_label = QLabel('Yup')
self.ylim_seter_up = QLineEdit(self)
self.ylim_seter_up.textChanged[str].connect(self.YupChanged)
self.hbox0 = QHBoxLayout()
self.hbox1 = QHBoxLayout()
self.hbox2 = QHBoxLayout()
self.hbox3 = QHBoxLayout()
self.hbox4 = QHBoxLayout()
self.hbox5 = QHBoxLayout()
self.hbox6 = QHBoxLayout()
self.hbox7 = QHBoxLayout()
'''
for w in [self.fit_cb,self.fit_label, self.fit_seter,self.xlim_seter_left_label,self.xlim_seter_left,self.xlim_seter_right_label,self.xlim_seter_right,self.ylim_seter_down_label,self.ylim_seter_down,self.ylim_seter_up_label,self.ylim_seter_up,self.shape_cb]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
'''
for w in [self.view]:
self.hbox0.addWidget(w)
self.hbox0.setAlignment(w, Qt.AlignVCenter)
for w in [self.Normalize_cb, self.norm_slider_label, self.norm_slider]:
self.hbox1.addWidget(w)
self.hbox1.setAlignment(w, Qt.AlignVCenter)
for w in [self.logx_cb, self.x_element_label, self.x_element]:
self.hbox2.addWidget(w)
self.hbox2.setAlignment(w, Qt.AlignVCenter)
for w in [self.logy_cb, self.y_element_label, self.y_element]:
self.hbox3.addWidget(w)
self.hbox3.setAlignment(w, Qt.AlignVCenter)
for w in [self.logz_cb, self.z_element_label, self.z_element]:
self.hbox4.addWidget(w)
self.hbox4.setAlignment(w, Qt.AlignVCenter)
self.vbox = QVBoxLayout()
#self.vbox.addWidget(self.view)
self.vbox.addLayout(self.hbox0)
self.vbox.addLayout(self.hbox1)
self.vbox.addLayout(self.hbox2)
self.vbox.addLayout(self.hbox3)
self.vbox.addLayout(self.hbox4)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame)
def Read(self, inpoints):
points = []
for i in inpoints:
points.append(i.split())
result = []
for i in points:
for l in range(len(i)):
a = float((i[l].split(','))[0])
a = a * self.x_scale
b = float((i[l].split(','))[1])
b = (self.height_load - b) * self.y_scale
result.append((a, b))
return (result)
def Load(self):
fileName, filetype = QFileDialog.getOpenFileName(self,
'选取文件',
'~/',
'PNG Files (*.png);;JPG Files (*.jpg);;SVG Files (*.svg)') # 设置文件扩展名过滤,注意用双分号间隔
print(fileName, '\t', filetype)
if ('svg' in fileName):
doc = minidom.parse(fileName) # parseString also exists
polygon_points = [path.getAttribute('points') for path in doc.getElementsByTagName('polygon')]
polyline_points = [path.getAttribute('points') for path in doc.getElementsByTagName('polyline')]
svg_width = [path.getAttribute('width') for path in doc.getElementsByTagName('svg')]
svg_height = [path.getAttribute('height') for path in doc.getElementsByTagName('svg')]
# print(svg_width)
# print(svg_height)
digit = '01234567890.-'
width = svg_width[0].replace('px', '').replace('pt', '')
height = svg_height[0].replace('px', '').replace('pt', '')
self.width_load = float(width)
self.height_load = float(height)
soup = BeautifulSoup(open(fileName), 'lxml')
tmpgon = soup.find_all('polygon')
tmppolyline = soup.find_all('polyline')
tmptext = soup.find_all('text')
tmpline = soup.find_all('line')
tmppath = soup.find_all('path')
self.strgons = []
for i in tmpgon:
a = (str(i)).replace('\n', '').replace('\t', '')
m = BeautifulSoup(a, 'lxml')
k = m.polygon.attrs
self.strgons.append(k['points'].split())
self.strpolylines = []
for i in tmppolyline:
a = (str(i)).replace('\n', '').replace('\t', '')
m = BeautifulSoup(a, 'lxml')
k = m.polyline.attrs
self.strpolylines.append(k['points'].split())
self.strlines = []
for i in tmpline:
a = (str(i)).replace('\n', '').replace('\t', '')
m = BeautifulSoup(a, 'lxml')
k = m.line.attrs
a = str(k['x1']) + ',' + str(k['y1']) + ' ' + str(k['x2']) + ',' + str(k['y2'])
self.strlines.append(a.split())
self.strpath = []
for i in tmppath:
a = (str(i)).replace('\n', '').replace('\t', '')
m = BeautifulSoup(a, 'lxml')
k = m.path.attrs
self.strpath.append(k['d'].split())
# print(self.strpath)
self.polygon = []
for i in self.strgons:
m = self.Read(i)
m.append(m[0])
self.polygon.append(m)
self.polyline = []
for i in self.strpolylines:
m = self.Read(i)
# print('i: ',i,'\n m:',m)
self.polyline.append(m)
self.line = []
for i in self.strlines:
m = self.Read(i)
# print('i: ',i,'\n m:',m)
self.line.append(m)
elif ('png' in fileName or 'jpg' in fileName):
self.img = mpimg.imread(fileName)
self.flag = 1
self.Magic()
def Reset(self):
self.flag = 0
self.Magic()
def FitChanged(self, text):
w = 'Fit' + text
self.fit_label.setText(w)
self.fit_label.adjustSize()
try:
self.FitLevel = float(text)
except:
pass
self.Magic()
def XleftChanged(self,text):
if len(text)<1:
self.LimSet = False
else:
self.LimSet = True
w = 'Left ' + text
self.xlim_seter_left_label.setText(w)
self.xlim_seter_left_label.adjustSize()
try:
self.Xleft = float(text)
except:
pass
self.Magic()
def XrightChanged(self,text):
if len(text)<1:
self.LimSet = False
else:
self.LimSet = True
w = 'Right ' + text
self.xlim_seter_right_label.setText(w)
self.xlim_seter_right_label.adjustSize()
try:
self.Xright = float(text)
except:
pass
self.Magic()
def YdownChanged(self,text):
if len(text)<1:
self.LimSet = False
else:
self.LimSet = True
w = 'Down ' + text
self.ylim_seter_down_label.setText(w)
self.ylim_seter_down_label.adjustSize()
try:
self.Ydown = float(text)
except:
pass
self.Magic()
def YupChanged(self,text):
if len(text)<1:
self.LimSet = False
else:
self.LimSet =True
w = 'Up ' + text
self.ylim_seter_up_label.setText(w)
self.ylim_seter_up_label.adjustSize()
try:
self.Yup = float(text)
except:
pass
self.Magic()
def ShapeChanged(self, text):
w = 'Shape' + text
#self.shape_label.setText(w)
#self.shape_label.adjustSize()
try:
self.ShapeGroups = int(text)
except:
pass
self.Magic()
def GetASequence(self, head=0, tail= 200, count=10):
if count > 0:
result = np.arange(head, tail, (tail - head) / count)
else:
result = np.arange(head, tail, (tail - head) / 10)
return (result)
def Magic(self):
#self.view.setFixedSize(self.width(), self.width())
self.WholeData = []
self.x_scale = self.width_plot / self.width_load
self.y_scale = self.height_plot / self.height_load
self.z_scale = self.depth_plot / self.depth_load
# print(self.x_scale,' and ',self.x_scale)
raw = self._df
a = int(self.x_element.value())
b = int(self.y_element.value())
c = int(self.z_element.value())
self.x_element_label.setText(self.items[a])
self.y_element_label.setText(self.items[b])
self.z_element_label.setText(self.items[c])
if (self.Left != self.Right) and (self.Down != self.Up) and abs(self.Left) + abs(self.Right) + abs(
self.Down) + abs(self.Up) != 0:
self.extent = [self.Left, self.Right, self.Down, self.Up]
elif (self.Left == self.Right and abs(self.Left) + abs(self.Right) != 0):
reply = QMessageBox.warning(self, 'Warning', 'You set same value to Left and Right limits.')
self.extent = 0
elif (self.Down == self.Up and abs(self.Down) + abs(self.Up) != 0):
reply = QMessageBox.warning(self, 'Warning', 'You set same value to Up and Down limits.')
self.extent = 0
else:
self.extent = 0
standardnamechosen = self.StandardsName[int(self.norm_slider.value())]
standardchosen = self.Standards[standardnamechosen]
self.norm_slider_label.setText(standardnamechosen)
PointLabels = []
XtoDraw = []
YtoDraw = []
ZtoDraw = []
Colors=[]
Alphas=[]
Markers=[]
Names=[]
for i in range(len(raw)):
# raw.at[i, 'DataType'] == 'User' or raw.at[i, 'DataType'] == 'user' or raw.at[i, 'DataType'] == 'USER'
TmpLabel = ''
# self.WholeData.append(math.log(tmp, 10))
if (raw.at[i, 'Label'] in PointLabels or raw.at[i, 'Label'] == ''):
TmpLabel = ''
else:
PointLabels.append(raw.at[i, 'Label'])
TmpLabel = raw.at[i, 'Label']
x, y ,z = 0, 0, 0
xuse, yuse,zuse = 0, 0, 0
x, y,z = raw.at[i, self.items[a]], raw.at[i, self.items[b]],raw.at[i, self.items[c]]
try:
xuse = x
yuse = y
zuse = z
self.xlabel = self.items[a]
self.ylabel = self.items[b]
self.zlabel = self.items[c]
if (self.Normalize_cb.isChecked()):
self.xlabel = self.items[a] + ' Norm by ' + standardnamechosen
self.x_element_label.setText(self.xlabel)
self.ylabel = self.items[b] + ' Norm by ' + standardnamechosen
self.y_element_label.setText(self.ylabel)
self.zlabel = self.items[c] + ' Norm by ' + standardnamechosen
self.z_element_label.setText(self.zlabel)
if self.items[a] in self.Element:
xuse = xuse / standardchosen[self.items[a]]
if self.items[b] in self.Element:
yuse = yuse / standardchosen[self.items[b]]
if self.items[c] in self.Element:
zuse = zuse / standardchosen[self.items[c]]
if (self.logx_cb.isChecked()):
xuse = math.log(x, 10)
self.xlabel = '$log10$ ' + self.xlabel
if (self.logy_cb.isChecked()):
yuse = math.log(y, 10)
self.ylabel = '$log10$ ' + self.ylabel
if (self.logz_cb.isChecked()):
zuse = math.log(z, 10)
self.zlabel = '$log10$ ' + self.zlabel
XtoDraw.append(xuse)
YtoDraw.append(yuse)
ZtoDraw.append(zuse)
Colors.append(raw.at[i, 'Color'])
Alphas.append(raw.at[i, 'Alpha'])
Names.append(raw.at[i, 'Label'])
Markers.append(raw.at[i, 'Marker'])
except(ValueError):
pass
if self.LimSet==False:
self.Xleft, self.Xright, self.Ydown, self.Yup, self.Tail, self.Head = min(XtoDraw), max(XtoDraw), min(YtoDraw), max(YtoDraw), min(ZtoDraw), max(ZtoDraw)
xmin, xmax = min(XtoDraw), max(XtoDraw)
ymin, ymax = min(YtoDraw), max(YtoDraw)
zmin, zmax = min(ZtoDraw), max(ZtoDraw)
xmean = np.mean(XtoDraw)
ymean = np.mean(YtoDraw)
zmean = np.mean(ZtoDraw)
Xoriginal = np.arange(xmin, xmax, (xmax - xmin) / 10)
Yoriginal = np.arange(ymin, ymax, (ymax - ymin) / 10)
Zoriginal = np.arange(zmin, zmax, (zmax - zmin) / 10)
XonPlot = self.GetASequence(tail=self.ShapeGroups)
YonPlot = self.GetASequence(tail=self.ShapeGroups)
ZonPlot = self.GetASequence(tail=self.ShapeGroups)
XonStick = []
YonStick = []
ZonStick = []
for i in range(len(XonPlot)):
XonStick.append([XonPlot[i], Xoriginal[i]])
YonStick.append([YonPlot[i], Yoriginal[i]])
ZonStick.append([ZonPlot[i], Zoriginal[i]])
pass
#print(XtoDraw,'\n', YtoDraw,'\n', ZtoDraw)
toDf = {self.xlabel:XtoDraw,
self.ylabel:YtoDraw,
self.zlabel:ZtoDraw}
newdf = pd.DataFrame(toDf)
pos = newdf.as_matrix()
print(pos)
ThreeDimView = gl.GLScatterPlotItem(pos=pos, color=(100, 255, 255, 88), size=0.1, pxMode=False)
print(xmean,'\n', ymean,'\n', zmean,'\n')
self.view.pan(xmean, ymean, zmean)
xgrid = gl.GLGridItem(size=QtGui.QVector3D(10, 10, 1), color=1)
ygrid = gl.GLGridItem(size=QtGui.QVector3D(20, 20, 2), color=2)
zgrid = gl.GLGridItem(size=QtGui.QVector3D(30, 30, 3), color=3)
## rotate x and y grids to face the correct direction
xgrid.rotate(90, 0, 1, 0)
ygrid.rotate(90, 1, 0, 0)
xgrid.translate(xmean, ymean, zmean)
ygrid.translate(xmean, ymean, zmean)
zgrid.translate(xmean, ymean, zmean)
## scale each grid differently
'''
xgrid.scale(12.8, 12.8, 12.8)
ygrid.scale(12.8, 12.8, 12.8)
zgrid.scale(12.8, 12.8, 12.8)
'''
# xgrid.setTransform(xmean,ymean,zmean)
self.view.addItem(xgrid)
self.view.addItem(ygrid)
self.view.addItem(zgrid)
self.view.addItem(ThreeDimView)
|
gpl-3.0
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/IPython/kernel/zmq/kernelapp.py
|
7
|
18674
|
"""An Application for launching a kernel
Authors
-------
* MinRK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Standard library imports
import atexit
import json
import os
import sys
import signal
# System library imports
import zmq
from zmq.eventloop import ioloop
from zmq.eventloop.zmqstream import ZMQStream
# IPython imports
from IPython.core.ultratb import FormattedTB
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases, catch_config_error
)
from IPython.core.profiledir import ProfileDir
from IPython.core.shellapp import (
InteractiveShellApp, shell_flags, shell_aliases
)
from IPython.utils import io
from IPython.utils.localinterfaces import localhost
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (
Any, Instance, Dict, Unicode, Integer, Bool, CaselessStrEnum,
DottedObjectName,
)
from IPython.utils.importstring import import_item
from IPython.kernel import write_connection_file
# local imports
from .heartbeat import Heartbeat
from .ipkernel import Kernel
from .parentpoller import ParentPollerUnix, ParentPollerWindows
from .session import (
Session, session_flags, session_aliases, default_secure,
)
from .zmqshell import ZMQInteractiveShell
#-----------------------------------------------------------------------------
# Flags and Aliases
#-----------------------------------------------------------------------------
kernel_aliases = dict(base_aliases)
kernel_aliases.update({
'ip' : 'IPKernelApp.ip',
'hb' : 'IPKernelApp.hb_port',
'shell' : 'IPKernelApp.shell_port',
'iopub' : 'IPKernelApp.iopub_port',
'stdin' : 'IPKernelApp.stdin_port',
'control' : 'IPKernelApp.control_port',
'f' : 'IPKernelApp.connection_file',
'parent': 'IPKernelApp.parent_handle',
'transport': 'IPKernelApp.transport',
})
if sys.platform.startswith('win'):
kernel_aliases['interrupt'] = 'IPKernelApp.interrupt'
kernel_flags = dict(base_flags)
kernel_flags.update({
'no-stdout' : (
{'IPKernelApp' : {'no_stdout' : True}},
"redirect stdout to the null device"),
'no-stderr' : (
{'IPKernelApp' : {'no_stderr' : True}},
"redirect stderr to the null device"),
'pylab' : (
{'IPKernelApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""),
})
# inherit flags&aliases for any IPython shell apps
kernel_aliases.update(shell_aliases)
kernel_flags.update(shell_flags)
# inherit flags&aliases for Sessions
kernel_aliases.update(session_aliases)
kernel_flags.update(session_flags)
_ctrl_c_message = """\
NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
To exit, you will have to explicitly quit this process, by either sending
"quit" from a client, or using Ctrl-\\ in UNIX-like environments.
To read more about this, see https://github.com/ipython/ipython/issues/2049
"""
#-----------------------------------------------------------------------------
# Application class for starting an IPython Kernel
#-----------------------------------------------------------------------------
class IPKernelApp(BaseIPythonApplication, InteractiveShellApp):
name='ipkernel'
aliases = Dict(kernel_aliases)
flags = Dict(kernel_flags)
classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
# the kernel class, as an importstring
kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True,
help="""The Kernel subclass to be used.
This should allow easy re-use of the IPKernelApp entry point
to configure and launch kernels other than IPython's own.
""")
kernel = Any()
poller = Any() # don't restrict this even though current pollers are all Threads
heartbeat = Instance(Heartbeat)
session = Instance('IPython.kernel.zmq.session.Session')
ports = Dict()
# ipkernel doesn't get its own config file
def _config_file_name_default(self):
return 'ipython_config.py'
# inherit config file name from parent:
parent_appname = Unicode(config=True)
def _parent_appname_changed(self, name, old, new):
if self.config_file_specified:
# it was manually specified, ignore
return
self.config_file_name = new.replace('-','_') + u'_config.py'
# don't let this count as specifying the config file
self.config_file_specified.remove(self.config_file_name)
# connection info:
transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True)
ip = Unicode(config=True,
help="Set the IP or interface on which the kernel will listen.")
def _ip_default(self):
if self.transport == 'ipc':
if self.connection_file:
return os.path.splitext(self.abs_connection_file)[0] + '-ipc'
else:
return 'kernel-ipc'
else:
return localhost()
hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]")
shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]")
iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]")
stdin_port = Integer(0, config=True, help="set the stdin (ROUTER) port [default: random]")
control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]")
connection_file = Unicode('', config=True,
help="""JSON file in which to store connection info [default: kernel-<pid>.json]
This file will contain the IP, ports, and authentication key needed to connect
clients to this kernel. By default, this file will be created in the security dir
of the current profile, but can be specified by absolute path.
""")
@property
def abs_connection_file(self):
if os.path.basename(self.connection_file) == self.connection_file:
return os.path.join(self.profile_dir.security_dir, self.connection_file)
else:
return self.connection_file
# streams, etc.
no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream',
config=True, help="The importstring for the OutStream factory")
displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook',
config=True, help="The importstring for the DisplayHook factory")
# polling
parent_handle = Integer(0, config=True,
help="""kill this process if its parent dies. On Windows, the argument
specifies the HANDLE of the parent process, otherwise it is simply boolean.
""")
interrupt = Integer(0, config=True,
help="""ONLY USED ON WINDOWS
Interrupt this process when the parent is signaled.
""")
def init_crash_handler(self):
# Install minimal exception handling
sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
ostream=sys.__stdout__)
def init_poller(self):
if sys.platform == 'win32':
if self.interrupt or self.parent_handle:
self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
elif self.parent_handle:
self.poller = ParentPollerUnix()
def _bind_socket(self, s, port):
iface = '%s://%s' % (self.transport, self.ip)
if self.transport == 'tcp':
if port <= 0:
port = s.bind_to_random_port(iface)
else:
s.bind("tcp://%s:%i" % (self.ip, port))
elif self.transport == 'ipc':
if port <= 0:
port = 1
path = "%s-%i" % (self.ip, port)
while os.path.exists(path):
port = port + 1
path = "%s-%i" % (self.ip, port)
else:
path = "%s-%i" % (self.ip, port)
s.bind("ipc://%s" % path)
return port
def load_connection_file(self):
"""load ip/port/hmac config from JSON connection file"""
try:
fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
except IOError:
self.log.debug("Connection file not found: %s", self.connection_file)
# This means I own it, so I will clean it up:
atexit.register(self.cleanup_connection_file)
return
self.log.debug(u"Loading connection file %s", fname)
with open(fname) as f:
s = f.read()
cfg = json.loads(s)
self.transport = cfg.get('transport', self.transport)
if self.ip == self._ip_default() and 'ip' in cfg:
# not overridden by config or cl_args
self.ip = cfg['ip']
for channel in ('hb', 'shell', 'iopub', 'stdin', 'control'):
name = channel + '_port'
if getattr(self, name) == 0 and name in cfg:
# not overridden by config or cl_args
setattr(self, name, cfg[name])
if 'key' in cfg:
self.config.Session.key = str_to_bytes(cfg['key'])
def write_connection_file(self):
"""write connection info to JSON file"""
cf = self.abs_connection_file
self.log.debug("Writing connection file: %s", cf)
write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
iopub_port=self.iopub_port, control_port=self.control_port)
def cleanup_connection_file(self):
cf = self.abs_connection_file
self.log.debug("Cleaning up connection file: %s", cf)
try:
os.remove(cf)
except (IOError, OSError):
pass
self.cleanup_ipc_files()
def cleanup_ipc_files(self):
"""cleanup ipc files if we wrote them"""
if self.transport != 'ipc':
return
for port in (self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port):
ipcfile = "%s-%i" % (self.ip, port)
try:
os.remove(ipcfile)
except (IOError, OSError):
pass
def init_connection_file(self):
if not self.connection_file:
self.connection_file = "kernel-%s.json"%os.getpid()
try:
self.load_connection_file()
except Exception:
self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
self.exit(1)
def init_sockets(self):
# Create a context, a session, and the kernel sockets.
self.log.info("Starting the kernel at pid: %i", os.getpid())
context = zmq.Context.instance()
# Uncomment this to try closing the context.
# atexit.register(context.term)
self.shell_socket = context.socket(zmq.ROUTER)
self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
self.iopub_socket = context.socket(zmq.PUB)
self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
self.stdin_socket = context.socket(zmq.ROUTER)
self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
self.control_socket = context.socket(zmq.ROUTER)
self.control_port = self._bind_socket(self.control_socket, self.control_port)
self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
def init_heartbeat(self):
"""start the heart beating"""
# heartbeat doesn't share context, because it mustn't be blocked
# by the GIL, which is accessed by libzmq when freeing zero-copy messages
hb_ctx = zmq.Context()
self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
self.hb_port = self.heartbeat.port
self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
self.heartbeat.start()
def log_connection_info(self):
"""display connection info, and store ports"""
basename = os.path.basename(self.connection_file)
if basename == self.connection_file or \
os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
# use shortname
tail = basename
if self.profile != 'default':
tail += " --profile %s" % self.profile
else:
tail = self.connection_file
lines = [
"To connect another client to this kernel, use:",
" --existing %s" % tail,
]
# log connection info
# info-level, so often not shown.
# frontends should use the %connect_info magic
# to see the connection info
for line in lines:
self.log.info(line)
# also raw print to the terminal if no parent_handle (`ipython kernel`)
if not self.parent_handle:
io.rprint(_ctrl_c_message)
for line in lines:
io.rprint(line)
self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
stdin=self.stdin_port, hb=self.hb_port,
control=self.control_port)
def init_session(self):
"""create our session object"""
default_secure(self.config)
self.session = Session(parent=self, username=u'kernel')
def init_blackhole(self):
"""redirects stdout/stderr to devnull if necessary"""
if self.no_stdout or self.no_stderr:
blackhole = open(os.devnull, 'w')
if self.no_stdout:
sys.stdout = sys.__stdout__ = blackhole
if self.no_stderr:
sys.stderr = sys.__stderr__ = blackhole
def init_io(self):
"""Redirect input streams and set a display hook."""
if self.outstream_class:
outstream_factory = import_item(str(self.outstream_class))
sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
if self.displayhook_class:
displayhook_factory = import_item(str(self.displayhook_class))
sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
def init_signal(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def init_kernel(self):
"""Create the Kernel object itself"""
shell_stream = ZMQStream(self.shell_socket)
control_stream = ZMQStream(self.control_socket)
kernel_factory = import_item(str(self.kernel_class))
kernel = kernel_factory(parent=self, session=self.session,
shell_streams=[shell_stream, control_stream],
iopub_socket=self.iopub_socket,
stdin_socket=self.stdin_socket,
log=self.log,
profile_dir=self.profile_dir,
user_ns=self.user_ns,
)
kernel.record_ports(self.ports)
self.kernel = kernel
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
# Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
# to ensure that any exception is printed straight to stderr.
# Normally _showtraceback associates the reply with an execution,
# which means frontends will never draw it, as this exception
# is not associated with any execute request.
shell = self.shell
_showtraceback = shell._showtraceback
try:
# replace pyerr-sending traceback with stderr
def print_tb(etype, evalue, stb):
print ("GUI event loop or pylab initialization failed",
file=io.stderr)
print (shell.InteractiveTB.stb2text(stb), file=io.stderr)
shell._showtraceback = print_tb
InteractiveShellApp.init_gui_pylab(self)
finally:
shell._showtraceback = _showtraceback
def init_shell(self):
self.shell = self.kernel.shell
self.shell.configurables.append(self)
@catch_config_error
def initialize(self, argv=None):
super(IPKernelApp, self).initialize(argv)
self.init_blackhole()
self.init_connection_file()
self.init_session()
self.init_poller()
self.init_sockets()
self.init_heartbeat()
# writing/displaying connection info must be *after* init_sockets/heartbeat
self.log_connection_info()
self.write_connection_file()
self.init_io()
self.init_signal()
self.init_kernel()
# shell init steps
self.init_path()
self.init_shell()
self.init_gui_pylab()
self.init_extensions()
self.init_code()
# flush stdout/stderr, so that anything written to these streams during
# initialization do not get associated with the first execution request
sys.stdout.flush()
sys.stderr.flush()
def start(self):
if self.poller is not None:
self.poller.start()
self.kernel.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
launch_new_instance = IPKernelApp.launch_instance
def main():
"""Run an IPKernel as an application"""
app = IPKernelApp.instance()
app.initialize()
app.start()
if __name__ == '__main__':
main()
|
gpl-3.0
|
toobaz/pandas
|
pandas/tests/groupby/aggregate/test_cython.py
|
2
|
6848
|
"""
test cython .agg behavior
"""
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, NaT, Series, Timedelta, Timestamp, bdate_range
from pandas.core.groupby.groupby import DataError
import pandas.util.testing as tm
@pytest.mark.parametrize(
"op_name",
[
"count",
"sum",
"std",
"var",
"sem",
"mean",
pytest.param(
"median",
# ignore mean of empty slice
# and all-NaN
marks=[pytest.mark.filterwarnings("ignore::RuntimeWarning")],
),
"prod",
"min",
"max",
],
)
def test_cythonized_aggers(op_name):
data = {
"A": [0, 0, 0, 0, 1, 1, 1, 1, 1, 1.0, np.nan, np.nan],
"B": ["A", "B"] * 6,
"C": np.random.randn(12),
}
df = DataFrame(data)
df.loc[2:10:2, "C"] = np.nan
op = lambda x: getattr(x, op_name)()
# single column
grouped = df.drop(["B"], axis=1).groupby("A")
exp = {cat: op(group["C"]) for cat, group in grouped}
exp = DataFrame({"C": exp})
exp.index.name = "A"
result = op(grouped)
tm.assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(["A", "B"])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group["C"])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ["A", "B"]
exp.name = "C"
result = op(grouped)["C"]
if op_name in ["sum", "prod"]:
tm.assert_series_equal(result, exp)
def test_cython_agg_boolean():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": np.random.randint(0, 2, 50).astype("bool"),
}
)
result = frame.groupby("a")["b"].mean()
expected = frame.groupby("a")["b"].agg(np.mean)
tm.assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg():
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
frame.groupby("a")["b"].mean()
frame = DataFrame({"a": np.random.randint(0, 5, 50), "b": ["foo", "bar"] * 25})
with pytest.raises(DataError, match=msg):
frame[["b"]].groupby(frame["a"]).mean()
def test_cython_agg_nothing_to_agg_with_dates():
frame = DataFrame(
{
"a": np.random.randint(0, 5, 50),
"b": ["foo", "bar"] * 25,
"dates": pd.date_range("now", periods=50, freq="T"),
}
)
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
frame.groupby("b").dates.mean()
def test_cython_agg_frame_columns():
# #2113
df = DataFrame({"x": [1, 2, 3], "y": [3, 4, 5]})
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
df.groupby(level=0, axis="columns").mean()
def test_cython_agg_return_dict():
# GH 16741
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
ts = df.groupby("A")["B"].agg(lambda x: x.value_counts().to_dict())
expected = Series(
[{"two": 1, "one": 1, "three": 1}, {"two": 2, "one": 2, "three": 1}],
index=Index(["bar", "foo"], name="A"),
name="B",
)
tm.assert_series_equal(ts, expected)
def test_cython_fail_agg():
dr = bdate_range("1/1/2000", periods=50)
ts = Series(["A", "B", "C", "D", "E"] * 10, index=dr)
grouped = ts.groupby(lambda x: x.month)
summed = grouped.sum()
expected = grouped.agg(np.sum)
tm.assert_series_equal(summed, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", np.median),
("var", np.var),
("add", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
],
)
def test__cython_agg_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = df.groupby(labels)._cython_agg_general(op)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"op, targop",
[
("mean", np.mean),
("median", lambda x: np.median(x) if len(x) > 0 else np.nan),
("var", lambda x: np.var(x, ddof=1)),
("min", np.min),
("max", np.max),
],
)
def test_cython_agg_empty_buckets(op, targop, observed):
df = pd.DataFrame([11, 12, 13])
grps = range(0, 55, 5)
# calling _cython_agg_general directly, instead of via the user API
# which sets different values for min_count, so do that here.
g = df.groupby(pd.cut(df[0], grps), observed=observed)
result = g._cython_agg_general(op)
g = df.groupby(pd.cut(df[0], grps), observed=observed)
expected = g.agg(lambda x: targop(x))
tm.assert_frame_equal(result, expected)
def test_cython_agg_empty_buckets_nanops(observed):
# GH-18869 can't call nanops on empty groups, so hardcode expected
# for these
df = pd.DataFrame([11, 12, 13], columns=["a"])
grps = range(0, 25, 5)
# add / sum
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"add"
)
intervals = pd.interval_range(0, 20, freq=5)
expected = pd.DataFrame(
{"a": [0, 0, 36, 0]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 0]
tm.assert_frame_equal(result, expected)
# prod
result = df.groupby(pd.cut(df["a"], grps), observed=observed)._cython_agg_general(
"prod"
)
expected = pd.DataFrame(
{"a": [1, 1, 1716, 1]},
index=pd.CategoricalIndex(intervals, name="a", ordered=True),
)
if observed:
expected = expected[expected.a != 1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("op", ["first", "last", "max", "min"])
@pytest.mark.parametrize(
"data", [Timestamp("2016-10-14 21:00:44.557"), Timedelta("17088 days 21:00:44.557")]
)
def test_cython_with_timestamp_and_nat(op, data):
# https://github.com/pandas-dev/pandas/issues/19526
df = DataFrame({"a": [0, 1], "b": [data, NaT]})
index = Index([0, 1], name="a")
# We will group by a and test the cython aggregations
expected = DataFrame({"b": [data, NaT]}, index=index)
result = df.groupby("a").aggregate(op)
tm.assert_frame_equal(expected, result)
|
bsd-3-clause
|
sinhrks/expandas
|
pandas_ml/core/groupby.py
|
2
|
3678
|
#!/usr/bin/env python
import pandas as pd
import pandas.compat as compat
from pandas_ml.compat import Appender
from pandas_ml.core.base import _BaseEstimator
from pandas_ml.core.frame import ModelFrame
from pandas_ml.core.generic import ModelPredictor, _shared_docs
from pandas_ml.core.series import ModelSeries
@Appender(pd.core.groupby.GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, ModelSeries):
klass = ModelSeriesGroupBy
elif isinstance(obj, ModelFrame):
klass = ModelFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
class ModelSeriesGroupBy(pd.core.groupby.SeriesGroupBy):
pass
class ModelFrameGroupBy(pd.core.groupby.DataFrameGroupBy, ModelPredictor):
_internal_caches = ['_estimator', '_predicted', '_proba', '_log_proba', '_decision']
_internal_names = pd.core.groupby.DataFrameGroupBy._internal_names + _internal_caches
_internal_names_set = set(_internal_names)
@Appender(_shared_docs['estimator_methods'] %
dict(funcname='transform', returned='returned : transformed result'))
def transform(self, func, *args, **kwargs):
if isinstance(func, GroupedEstimator):
return ModelPredictor.transform(self, func, *args, **kwargs)
else:
return pd.core.groupby.DataFrameGroupBy.transform(self, func, *args, **kwargs)
def _get_mapper(self, estimator, method_name):
# mappings are handled by ModelFrame._get_mapper
return None
def _call(self, estimator, method_name, *args, **kwargs):
if method_name in ['fit', 'fit_transform']:
estimator = GroupedEstimator(estimator, self)
if not isinstance(estimator, GroupedEstimator):
raise ValueError('Class {0} is not GroupedEstimator'.format(estimator.__class__.__name__))
results = {}
for name, group in self:
e = estimator.groups[name]
method = getattr(group, method_name)
results[name] = method(e)
self.estimator = estimator
if method_name == 'fit':
return estimator
else:
return results
def _wrap_transform(self, transformed):
return self._wrap_results(transformed)
def _wrap_predicted(self, predicted, estimator):
return self._wrap_results(predicted)
def _wrap_results(self, results):
keys = []
values = []
for key, value in compat.iteritems(results):
keys.extend([key] * len(value))
values.append(value)
results = pd.concat(values, axis=0, ignore_index=False)
if isinstance(results, pd.Series):
results = ModelSeries(results)
# keys must be list
results = results.groupby(by=keys)
elif isinstance(results, pd.DataFrame):
results = ModelFrame(results)
# keys must be Series
results = results.groupby(by=pd.Series(keys))
else:
raise ValueError('Unknown type: {0}'.format(results.__class__.__name__))
return results
class GroupedEstimator(_BaseEstimator):
"""
Create grouped estimators based on passed estimator
"""
def __init__(self, estimator, grouped):
if not isinstance(grouped, pd.core.groupby.DataFrameGroupBy):
raise ValueError("'grouped' must be DataFrameGroupBy instance")
import sklearn.base as base
self.groups = {}
for name, group in grouped:
e = base.clone(estimator)
e = e.set_params(**estimator.get_params())
self.groups[name] = e
|
bsd-3-clause
|
arthur-gouveia/DAT210x
|
Module5/assignment4.py
|
1
|
10865
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
#
# TODO: Parameters to play around with
PLOT_TYPE_TEXT = False # If you'd like to see indices
PLOT_VECTORS = True # If you'd like to see your original features in P.C.-Space
PLOT_HIST_BOXPLOT = True
matplotlib.style.use('ggplot') # Look Pretty
c = ['red', 'green', 'blue', 'orange', 'yellow', 'brown']
def drawVectors(transformed_features, components_, columns, plt):
num_columns = len(columns)
# This function will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## Visualize projections
# Sort each column by its length. These are your *original*
# columns, not the principal components.
import math
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Projected Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75, zorder=600000)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75, zorder=600000)
return ax
def doPCA(data, dimensions=2):
from sklearn.decomposition import PCA
model = PCA(n_components=dimensions, svd_solver='randomized')
model.fit(data)
return model
def doKMeans(data, clusters=0):
#
# TODO: Do the KMeans clustering here, passing in the # of clusters parameter
# and fit it against your data. Then, return a tuple containing the cluster
# centers and the labels
#
model = KMeans(clusters)
model.fit(data)
return model.cluster_centers_, model.labels_
#
# TODO: Load up the dataset. It has may or may not have nans in it. Make
# sure you catch them and destroy them, by setting them to '0'. This is valid
# for this dataset, since if the value is missing, you can assume no $ was spent
# on it.
#
df = pd.read_csv('Datasets/Wholesale customers data.csv')
nulls = df.isnull().sum()
print('Number of missing values')
print(nulls)
if nulls.sum() > 0:
df.fillna(0)
print('{} missing values were replaced'.format(nulls.sum()))
print('This are the new numbers of missing values: \n{}'.format(
df.isnull.sum()))
#
# TODO: As instructed, get rid of the 'Channel' and 'Region' columns, since
# you'll be investigating as if this were a single location wholesaler, rather
# than a national / international one. Leaving these fields in here would cause
# KMeans to examine and give weight to them.
#
df = df.ix[:, 2:]
#
# TODO: Before unitizing / standardizing / normalizing your data in preparation for
# K-Means, it's a good idea to get a quick peek at it. You can do this using the
# .describe() method, or even by using the built-in pandas df.plot.hist()
#
print(df.describe())
if (PLOT_HIST_BOXPLOT):
df.hist()
plt.suptitle('Histogram of original data')
plt.figure()
sns.boxplot(data=df)
plt.suptitle('Boxplot of original data')
#
# INFO: Having checked out your data, you may have noticed there's a pretty big gap
# between the top customers in each feature category and the rest. Some feature
# scaling algos won't get rid of outliers for you, so it's a good idea to handle that
# manually---particularly if your goal is NOT to determine the top customers. After
# all, you can do that with a simple Pandas .sort_values() and not a machine
# learning clustering algorithm. From a business perspective, you're probably more
# interested in clustering your +/- 2 standard deviation customers, rather than the
# creme dela creme, or bottom of the barrel'ers
#
# Remove top 10 and bottom 5 samples for each column:
drop = {}
for col in df.columns:
# Bottom 5
sort = df.sort_values(by=col, ascending=True)
if len(sort) > 5: sort=sort[:5]
for index in sort.index: drop[index] = True # Just store the index once
# Top 5
sort = df.sort_values(by=col, ascending=False)
if len(sort) > 10: sort=sort[:10]
for index in sort.index: drop[index] = True # Just store the index once
#
# INFO Drop rows by index. We do this all at once in case there is a
# collision. This way, we don't end up dropping more rows than we have
# to, if there is a single row that satisfies the drop for multiple columns.
# Since there are 6 rows, if we end up dropping < 5*6*2 = 60 rows, that means
# there indeed were collisions.
print("Dropping {0} Outliers...".format(len(drop)))
df.drop(inplace=True, labels=drop.keys(), axis=0)
print(df.describe())
if (PLOT_HIST_BOXPLOT):
df.hist()
plt.suptitle('After outlier removal')
plt.figure()
sns.boxplot(data=df)
plt.suptitle('After outlier removal')
#
# INFO: What are you interested in?
#
# Depending on what you're interested in, you might take a different approach
# to normalizing/standardizing your data.
#
# You should note that all columns left in the dataset are of the same unit.
# You might ask yourself, do I even need to normalize / standardize the data?
# The answer depends on what you're trying to accomplish. For instance, although
# all the units are the same (generic money unit), the price per item in your
# store isn't. There may be some cheap items and some expensive one. If your goal
# is to find out what items people buy tend to buy together but you didn't
# unitize properly before running kMeans, the contribution of the lesser priced
# item would be dwarfed by the more expensive item.
#
# For a great overview on a few of the normalization methods supported in SKLearn,
# please check out: https://stackoverflow.com/questions/30918781/right-function-for-normalizing-input-of-sklearn-svm
#
# Suffice to say, at the end of the day, you're going to have to know what question
# you want answered and what data you have available in order to select the best
# method for your purpose. Luckily, SKLearn's interfaces are easy to switch out
# so in the mean time, you can experiment with all of them and see how they alter
# your results.
#
#
# 5-sec summary before you dive deeper online:
#
# NORMALIZATION: Let's say your user spend a LOT. Normalization divides each item by
# the average overall amount of spending. Stated differently, your
# new feature is = the contribution of overall spending going into
# that particular item: $spent on feature / $overall spent by sample
#
# MINMAX: What % in the overall range of $spent by all users on THIS particular
# feature is the current sample's feature at? When you're dealing with
# all the same units, this will produce a near face-value amount. Be
# careful though: if you have even a single outlier, it can cause all
# your data to get squashed up in lower percentages.
# Imagine your buyers usually spend $100 on wholesale milk, but today
# only spent $20. This is the relationship you're trying to capture
# with MinMax. NOTE: MinMax doesn't standardize (std. dev.); it only
# normalizes / unitizes your feature, in the mathematical sense.
# MinMax can be used as an alternative to zero mean, unit variance scaling.
# [(sampleFeatureValue-min) / (max-min)] * (max-min) + min
# Where min and max are for the overall feature values for all samples.
#
# TODO: Un-comment just ***ONE*** of lines at a time and see how alters your results
# Pay attention to the direction of the arrows, as well as their LENGTHS
#T = preprocessing.StandardScaler().fit_transform(df)
#T = preprocessing.MinMaxScaler().fit_transform(df)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#T = preprocessing.Normalizer().fit_transform(df)
#T = df # No Change
scalers = {'Standard': preprocessing.StandardScaler,
'MinMax': preprocessing.MinMaxScaler,
'MaxAbs': preprocessing.MaxAbsScaler,
'Normalizer': preprocessing.Normalizer}
actual_scaler = 'Normalizer'
T = scalers[actual_scaler]().fit_transform(df)
#
# INFO: Sometimes people perform PCA before doing KMeans, so that KMeans only
# operates on the most meaningful features. In our case, there are so few features
# that doing PCA ahead of time isn't really necessary, and you can do KMeans in
# feature space. But keep in mind you have the option to transform your data to
# bring down its dimensionality. If you take that route, then your Clusters will
# already be in PCA-transformed feature space, and you won't have to project them
# again for visualization.
# Do KMeans
n_clusters = 3
centroids, labels = doKMeans(T, n_clusters)
#
# TODO: print out your centroids. They're currently in feature-space, which
# is good. print them out before you transform them into PCA space for viewing
#
print('Centroids values:\n{}'.format(centroids))
# Do PCA *after* to visualize the results. Project the centroids as well as
# the samples into the new 2D feature space for visualization purposes.
display_pca = doPCA(T)
T = display_pca.transform(T)
CC = display_pca.transform(centroids)
# Visualize all the samples. Give them the color of their cluster label
fig = plt.figure()
ax = fig.add_subplot(111)
if PLOT_TYPE_TEXT:
# Plot the index of the sample, so you can further investigate it in your dset
for i in range(len(T)): ax.text(T[i,0], T[i,1], df.index[i], color=c[labels[i]], alpha=0.75, zorder=600000)
ax.set_xlim(min(T[:,0])*1.2, max(T[:,0])*1.2)
ax.set_ylim(min(T[:,1])*1.2, max(T[:,1])*1.2)
else:
# Plot a regular scatter plot
sample_colors = [ c[labels[i]] for i in range(len(T)) ]
ax.scatter(T[:, 0], T[:, 1], c=sample_colors, marker='o', alpha=0.2)
# Plot the Centroids as X's, and label them
ax.scatter(CC[:, 0], CC[:, 1], marker='x', s=169, linewidths=3, zorder=1000, c=c)
for i in range(len(centroids)): ax.text(CC[i, 0], CC[i, 1], str(i), zorder=500010, fontsize=18, color=c[i])
# Display feature vectors for investigation:
if PLOT_VECTORS: drawVectors(T, display_pca.components_, df.columns, plt)
# Add the cluster label back into the dataframe and display it:
df['label'] = pd.Series(labels, index=df.index)
print(df)
plt.show()
plt.suptitle(actual_scaler)
|
mit
|
demisjohn/EMpy
|
setup.py
|
3
|
1287
|
__author__ = 'Lorenzo Bolla'
from setuptools import setup, find_packages
from EMpy import __version__
with open('README.rst', 'r') as readme:
long_description = readme.read()
setup(
name='ElectroMagneticPython',
version=__version__,
author='Lorenzo Bolla',
author_email='[email protected]',
description='EMpy - ElectroMagnetic Python',
long_description=long_description,
url='http://lbolla.github.io/EMpy/',
download_url='https://github.com/lbolla/EMpy',
license='BSD',
platforms=['Windows', 'Linux', 'Mac OS-X'],
packages=find_packages(),
install_requires=[
'distribute>=0.6.28',
'future<1.0dev',
'numpy<2.0dev',
'scipy<1.0dev',
'matplotlib<2.0dev',
],
provides=['EMpy'],
test_suite='tests',
tests_require=[
'nose<2.0dev',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering :: Physics',
]
)
|
mit
|
CloudVLab/professional-services
|
examples/e2e-home-appliance-status-monitoring/ml/trainer/task.py
|
2
|
9191
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import io
import json
import os
import pandas as pd
import numpy as np
import sklearn.metrics
import trainer.model as model
import tensorflow as tf
from google.cloud import storage
SELECT_COLUMN = [19, ] + list(range(21, 29))
def test(hparams, estimator):
"""Run trained estimator on the test set.
Run trained estimator on the testset for debugging.
Args:
hparams: hyper-parameteters
estimator: trained tf estimator
"""
test_input = lambda: model.make_input_fn(
data_file=hparams.test_file,
seq_len=hparams.seq_len,
batch_size=hparams.eval_batch_size,
cols=SELECT_COLUMN,
num_epochs=1)
# load test data
if hparams.test_file.startswith("gs"):
bucket_name = hparams.test_file.split('/')[2]
file_path = '/'.join(hparams.test_file.split('/')[3:])
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob_context = bucket.blob(file_path)
test_data = pd.read_csv(io.BytesIO(blob_context.download_as_string()),
index_col=0,
encoding='utf-8')
else:
test_data = pd.read_csv(hparams.test_file, index_col=0)
tf.logging.info('test_data.shape={}'.format(test_data.shape))
# make predictions
predictions = estimator.predict(input_fn=test_input)
preds = []
for pred_dict in predictions:
preds.append(pred_dict['probabilities'])
preds = np.array(preds)
tf.logging.info('preds.shape={}'.format(preds.shape))
tf.logging.info('preds.max()={}'.format(preds.max()))
# output metrics
groundtruth = test_data.iloc[hparams.seq_len - 1:]
pred_names = [x.replace('_on', '_pred')
for x in groundtruth.columns if '_on' in x]
preds = preds.round().astype(np.uint8)
preds = pd.DataFrame(preds, columns=pred_names, index=groundtruth.index)
df = pd.merge(groundtruth, preds, left_index=True, right_index=True)
appliances_names = [x.replace('_pred', '') for x in pred_names]
for i, app in enumerate(appliances_names):
precision = sklearn.metrics.precision_score(
df[app + '_on'], df[app + '_pred'])
recall = sklearn.metrics.recall_score(
df[app + '_on'], df[app + '_pred'])
tf.logging.info('{0}:\tprecision={1:.2f}, recall={2:.2f}'.format(
app, precision, recall))
def run_experiment(hparams):
"""Run the training and evaluate using the high level API
Args:
hparams: dict, dictionary of hyper-parameters related to the running experiment.
"""
select_cols = SELECT_COLUMN
feat_col_names = ['ActivePower_{}'.format(i + 1)
for i in range(hparams.seq_len)]
# Construct input function for training, evaluation and testing
# Note: Don't filter on the evaluation and test data
train_input = lambda: model.make_input_fn(
data_file=hparams.train_file,
seq_len=hparams.seq_len,
batch_size=hparams.train_batch_size,
cols=select_cols,
train_flag=True,
num_epochs=hparams.num_epochs,
filter_prob=hparams.filter_prob)
eval_input = lambda: model.make_input_fn(
data_file=hparams.eval_file,
seq_len=hparams.seq_len,
batch_size=hparams.eval_batch_size,
cols=select_cols,
num_epochs=1)
model_dir = os.path.join(
hparams.job_dir,
json.loads(os.environ.get('TF_CONFIG', '{}'))
.get('task', {}).get('trial', '')
)
tf.logging.info('model dir {}'.format(model_dir))
# Experiment running configuration
# Checkpoint is configured to be saved every ten minutes
run_config = tf.estimator.RunConfig(save_checkpoints_steps=2500)
run_config = run_config.replace(model_dir=model_dir)
params = {'feat_cols': feat_col_names,
'seq_len': hparams.seq_len,
'lstm_size': hparams.lstm_size,
'batch_size': hparams.train_batch_size,
'num_appliances': len(select_cols) - 1,
'num_layers': hparams.num_layers,
'learning_rate': hparams.learning_rate,
'dropout_rate': hparams.dropout_rate,
'use_keras': hparams.keras}
estimator = tf.estimator.Estimator(model_fn=model.model_fn,
model_dir=model_dir,
config=run_config,
params=params)
# Set training spec
early_stopping = tf.contrib.estimator.stop_if_no_increase_hook(
estimator,
metric_name='f_measure',
max_steps_without_increase=2000,
min_steps=100,
run_every_secs=300)
train_spec = tf.estimator.TrainSpec(input_fn=train_input,
max_steps=hparams.train_steps,
hooks=[early_stopping])
# Set serving function, exporter and evaluation spec
# The serving function is only applicable for JSON format input
serving_function = model.json_serving_input_fn(feat_names=feat_col_names)
exporter = tf.estimator.FinalExporter(name=hparams.model_name,
serving_input_receiver_fn=serving_function)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input,
steps=None,
throttle_secs=120,
exporters=[exporter],
name='energy-disaggregation-eval')
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# test on test data, just for CMLE online debugging's purpose
if hparams.test:
test(hparams, estimator)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Input Arguments
parser.add_argument(
'--train-file',
help='GCS or local paths to training data'
)
parser.add_argument(
'--num-epochs',
help="""
Maximum number of training data epochs on which to train.
If both --max-steps and --num-epochs are specified,
the training job will run for --max-steps or --num-epochs,
whichever occurs first. If unspecified will run for --max-steps.
""",
type=int,
default=40
)
parser.add_argument(
'--train-batch-size',
help='Batch size for training steps',
type=int,
default=64
)
parser.add_argument(
'--eval-file',
help='GCS or local paths to evaluation data',
)
parser.add_argument(
'--eval-batch-size',
help='Batch size for evaluation steps',
type=int,
default=64
)
parser.add_argument(
'--test-file',
help='GCS or local paths to test data',
)
# Training arguments
parser.add_argument(
'--seq-len',
help='Length of cropped sequence',
default=5,
type=int
)
parser.add_argument(
'--lstm-size',
help='Size of lstm',
default=131,
type=int
)
parser.add_argument(
'--num-layers',
help='Number of layers in the model',
default=4,
type=int
)
parser.add_argument(
'--dropout-rate',
help='The rate of drop out',
default=0.3204,
type=float
)
parser.add_argument(
'--learning-rate',
help='Learning rate',
default=8.1729e-5,
type=float
)
parser.add_argument(
'--filter-prob',
help='Filter probability',
default=0.6827,
type=float
)
parser.add_argument(
'--job-dir',
help='GCS location to write checkpoints and export models',
default='/tmp/estimator'
)
parser.add_argument(
'--model-name',
help='name of the model',
default='estimator'
)
# Argument to turn on all logging
parser.add_argument(
'--verbosity',
choices=[
'DEBUG',
'ERROR',
'FATAL',
'INFO',
'WARN'
],
default='INFO',
)
# Experiment arguments
parser.add_argument(
'--train-steps',
help="""\
Steps to run the training job for. If --num-epochs is not specified,
this must be. Otherwise the training job will run indefinitely.\
""",
default=1e5,
type=int
)
parser.add_argument(
'--eval-steps',
help='Number of steps to run evalution for at each checkpoint',
default=1e3,
type=int
)
parser.add_argument(
'--test',
help='Whether to test a model',
default=False,
type=bool
)
parser.add_argument(
'--keras',
help='Whether use keras authoring',
action='store_true'
)
args, _ = parser.parse_known_args()
# Set python level verbosity
tf.logging.set_verbosity(args.verbosity)
# Set C++ Graph Execution level verbosity
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(
tf.logging.__dict__[args.verbosity] / 10)
config = args.__dict__
for k, v in config.items():
tf.logging.info('{}: {}'.format(k, v))
# Run the training job
run_experiment(args)
|
apache-2.0
|
peterk87/sistr_cmd
|
sistr/src/cgmlst/extras/hclust_cutree.py
|
1
|
2176
|
from fastcluster import linkage
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import fcluster
import pandas as pd
import numpy as np
def profiles_to_np_array(profiles_csv_path):
"""
"""
df = pd.read_csv(profiles_csv_path, index_col=0)
arr = np.array(df, dtype=np.float64)
genomes = df.index
markers = df.columns
return arr, genomes, markers
def nr_profiles(arr, genomes):
"""
Get a condensed cgMLST pairwise distance matrix for specified Genomes_
where condensed means redundant cgMLST profiles are only represented once in the distance matrix.
Args:
user_name (list): List of Genome_ names to retrieve condensed distance matrix for
Returns:
(numpy.array, list): tuple of condensed cgMLST distance matrix and list of grouped Genomes_
"""
gs_collapse = []
genome_idx_dict = {}
indices = []
patt_dict = {}
for i, g in enumerate(genomes):
p = arr[i, :].tostring()
if p in patt_dict:
parent = patt_dict[p]
idx = genome_idx_dict[parent]
gs_collapse[idx].append(g)
else:
indices.append(i)
patt_dict[p] = g
genome_idx_dict[g] = len(gs_collapse)
gs_collapse.append([g])
return arr[indices, :], gs_collapse
def dist_matrix_hamming(arr):
return pdist(arr, metric='hamming')
def complete_linkage(dm):
"""
Perform complete linkage hierarchical clustering on a distance matrix.
Args:
dm (numpy.array): Distance matrix
Returns:
(object): fastcluster complete linkage hierarchical clustering object
"""
return linkage(dm, 'complete')
def cutree(Z, thresholds):
out = {}
for t in thresholds:
out[t] = fcluster(Z, t, criterion='distance')
return pd.DataFrame(out)
def expand_clusters_dataframe(df_clusters, genome_groups):
lens_genome_groups = [len(xs) for xs in genome_groups]
idxs = np.repeat(df_clusters.index.values, lens_genome_groups)
df_cl_exp = df_clusters.reindex(idxs, method='ffill')
df_cl_exp.index = [g for gs in genome_groups for g in gs]
return df_cl_exp
|
apache-2.0
|
jakirkham/mpld3
|
examples/random_walk.py
|
19
|
2303
|
"""
Visualizing Random Walks
========================
This shows the use of transparent lines to visualize random walk data.
Thre is also a custom plugin defined which causes lines to be highlighted
when the mouse hovers over them.
Use the toolbar buttons at the bottom-right of the plot to enable zooming
and panning, and to reset the view.
"""
import jinja2
import json
import numpy as np
import matplotlib.pyplot as plt
import mpld3
from mpld3 import plugins, utils
class HighlightLines(plugins.PluginBase):
"""A plugin to highlight lines on hover"""
JAVASCRIPT = """
mpld3.register_plugin("linehighlight", LineHighlightPlugin);
LineHighlightPlugin.prototype = Object.create(mpld3.Plugin.prototype);
LineHighlightPlugin.prototype.constructor = LineHighlightPlugin;
LineHighlightPlugin.prototype.requiredProps = ["line_ids"];
LineHighlightPlugin.prototype.defaultProps = {alpha_bg:0.3, alpha_fg:1.0}
function LineHighlightPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHighlightPlugin.prototype.draw = function(){
for(var i=0; i<this.props.line_ids.length; i++){
var obj = mpld3.get_element(this.props.line_ids[i], this.fig),
alpha_fg = this.props.alpha_fg;
alpha_bg = this.props.alpha_bg;
obj.elements()
.on("mouseover", function(d, i){
d3.select(this).transition().duration(50)
.style("stroke-opacity", alpha_fg); })
.on("mouseout", function(d, i){
d3.select(this).transition().duration(200)
.style("stroke-opacity", alpha_bg); });
}
};
"""
def __init__(self, lines):
self.lines = lines
self.dict_ = {"type": "linehighlight",
"line_ids": [utils.get_id(line) for line in lines],
"alpha_bg": lines[0].get_alpha(),
"alpha_fg": 1.0}
N_paths = 50
N_steps = 100
x = np.linspace(0, 10, 100)
y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
y = y.cumsum(1)
fig, ax = plt.subplots(subplot_kw={'xticks': [], 'yticks': []})
lines = ax.plot(x, y.T, color='blue', lw=4, alpha=0.1)
plugins.connect(fig, HighlightLines(lines))
mpld3.show()
|
bsd-3-clause
|
jrkerns/pylinac
|
pylinac/dlg.py
|
1
|
4907
|
from math import floor, ceil
from typing import Sequence
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import stats
from pylinac import Normalization
from pylinac.core import image
from pylinac.core.profile import SingleProfile, find_peaks
from pylinac.picketfence import MLCArrangement, MLC
class DLG:
"""Analyze a machine's dosimetric leaf gap by looking at profiles with various amounts of overlap. This is NOT the
same procedure as the sweeping gaps as provided by Varian, although the determined value should be similar."""
def __init__(self, path):
self.image = image.LinacDicomImage(path)
self.measured_dlg: float = -np.inf
self.measured_dlg_per_leaf: list = []
self.planned_dlg_per_leaf: list = []
self._lin_fit = None
def analyze(self, gaps: Sequence, mlc: MLC, y_field_size: float = 100, profile_width=10):
"""Analyze an EPID image with varying MLC overlaps to determine the DLG.
Parameters
----------
gaps
The gaps (i.e. overlap) of the leaves in mm.
These should typically be in descending order and also be negative. E.g. (-1, ..., -2.2).
mlc
The MLC type/arrangement. This lets us know where the leaf centers are to take a profile along.
y_field_size
The field size along the y-dimension (perpendicular to the leaf travel). This will determined which leaves
are associated with which gap.
profile_width
The width of the profile to take along the axes parallel to leaf motion. This should be a good bit wider
than the gap values. The default is reasonable and it is unlikely it needs tweaking.
"""
measured_dlg_per_leaf = []
planned_dlg_per_leaf = []
mlc = mlc.value['arrangement']
g = list(gaps)
g.sort()
profile_width_px = round(self.image.dpmm * profile_width)
mid_width = self.image.shape[1] / 2
mid_height = self.image.shape[0] / 2
for idx, center in enumerate(mlc.centers):
if -y_field_size / 2 < center < y_field_size / 2:
# get the pixel window area
center_px = center * self.image.dpmm
width_px = mlc.widths[idx] / 4 * self.image.dpmm
top = ceil(mid_height + center_px + width_px)
bottom = floor(mid_height + center_px - width_px)
# sample the window and take the average perpendicular to MLC motion
window = self.image[bottom:top, int(mid_width - profile_width_px):int(mid_width + profile_width_px)]
width = self._determine_measured_gap(window.mean(axis=0))
planned_dlg_per_leaf.append(self._get_dlg_offset(y_field_size, center, g))
measured_dlg_per_leaf.append(width)
# fit the data to a line and determine the DLG from the 0 intercept
lin_fit = stats.linregress(planned_dlg_per_leaf, measured_dlg_per_leaf)
dlg = lin_fit.intercept / lin_fit.slope
self._lin_fit = lin_fit
self.measured_dlg = dlg
self.planned_dlg_per_leaf = planned_dlg_per_leaf
self.measured_dlg_per_leaf = measured_dlg_per_leaf
def plot_dlg(self, show=True):
"""Plot the measured DLG values across the planned gaps"""
if not self.measured_dlg_per_leaf:
raise ValueError("Analyze the image before plotting with .analyze()")
plt.plot(self.planned_dlg_per_leaf, self.measured_dlg_per_leaf, 'gx')
plt.plot(self.planned_dlg_per_leaf,
self._lin_fit.intercept + self._lin_fit.slope * np.array(self.planned_dlg_per_leaf), 'r',
label='fitted line')
plt.title(f"Measured DLG: {self.measured_dlg:2.3f}mm")
plt.grid()
if show:
plt.show()
@staticmethod
def _get_dlg_offset(field_size, leaf_center, dlgs: Sequence) -> float:
"""Return the planned leaf overlap for a given leaf"""
roi_size = field_size / len(dlgs)
y_bounds = [field_size / 2 - idx * roi_size for idx in range(len(dlgs) + 1)]
for idx, gap in enumerate(dlgs):
upper_bound = y_bounds[idx]
lower_bound = y_bounds[idx + 1]
if lower_bound < leaf_center < upper_bound:
return gap
@staticmethod
def _determine_measured_gap(profile: np.ndarray) -> float:
"""Return the measured gap based on profile height"""
mid_value = profile[int(len(profile) / 2)]
prof = SingleProfile(profile, normalization_method=Normalization.NONE)
if mid_value < profile.mean():
prof.invert()
_, props = find_peaks(prof.values, max_number=1)
if mid_value < profile.mean():
return -props['prominences'][0]
else:
return props['prominences'][0]
|
mit
|
caot/sas7bdat
|
sas7bdat.py
|
1
|
64205
|
#!/usr/bin/env python
"""
This module will read sas7bdat files using pure Python (2.7+, 3+).
No SAS software required!
"""
from __future__ import division, absolute_import, print_function,\
unicode_literals
import atexit
import csv
import logging
import math
import os
import platform
import struct
import sys
from datetime import datetime, timedelta
import six
xrange = six.moves.range
__all__ = ['SAS7BDAT']
def _debug(t, v, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(t, v, tb)
else:
import pdb
import traceback
traceback.print_exception(t, v, tb)
print()
pdb.pm()
os._exit(1)
def _get_color_emit(prefix, fn):
# This doesn't work on Windows since Windows doesn't support
# the ansi escape characters
def _new(handler):
levelno = handler.levelno
if levelno >= logging.CRITICAL:
color = '\x1b[31m' # red
elif levelno >= logging.ERROR:
color = '\x1b[31m' # red
elif levelno >= logging.WARNING:
color = '\x1b[33m' # yellow
elif levelno >= logging.INFO:
color = '\x1b[32m' # green or normal
elif levelno >= logging.DEBUG:
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
handler.msg = '%s[%s] %s%s' % (color, prefix, handler.msg, '\x1b[0m')
return fn(handler)
return _new
class ParseError(Exception):
pass
class Decompressor(object):
def __init__(self, parent):
self.parent = parent
def decompress_row(self, offset, length, result_length, page):
raise NotImplementedError
@staticmethod
def to_ord(int_or_str):
if isinstance(int_or_str, int):
return int_or_str
return ord(int_or_str)
@staticmethod
def to_chr(int_or_str):
py2 = six.PY2
if isinstance(int_or_str, (bytes, bytearray)):
return int_or_str
if py2:
return chr(int_or_str)
return bytes([int_or_str])
class RLEDecompressor(Decompressor):
"""
Decompresses data using the Run Length Encoding algorithm
"""
def decompress_row(self, offset, length, result_length, page):
b = self.to_ord
c = self.to_chr
current_result_array_index = 0
result = []
i = 0
for j in xrange(length):
if i != j:
continue
control_byte = b(page[offset + i]) & 0xF0
end_of_first_byte = b(page[offset + i]) & 0x0F
if control_byte == 0x00:
if i != (length - 1):
count_of_bytes_to_copy = (
(b(page[offset + i + 1]) & 0xFF) +
64 +
end_of_first_byte * 256
)
start = offset + i + 2
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy + 1
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0x40:
copy_counter = (
end_of_first_byte * 16 +
(b(page[offset + i + 1]) & 0xFF)
)
for _ in xrange(copy_counter + 18):
result.append(c(page[offset + i + 2]))
current_result_array_index += 1
i += 2
elif control_byte == 0x60:
for _ in xrange(end_of_first_byte * 256 +
(b(page[offset + i + 1]) & 0xFF) + 17):
result.append(c(0x20))
current_result_array_index += 1
i += 1
elif control_byte == 0x70:
for _ in xrange((b(page[offset + i + 1]) & 0xFF) + 17):
result.append(c(0x00))
current_result_array_index += 1
i += 1
elif control_byte == 0x80:
count_of_bytes_to_copy = min(end_of_first_byte + 1,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0x90:
count_of_bytes_to_copy = min(end_of_first_byte + 17,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xA0:
count_of_bytes_to_copy = min(end_of_first_byte + 33,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xB0:
count_of_bytes_to_copy = min(end_of_first_byte + 49,
length - (i + 1))
start = offset + i + 1
end = start + count_of_bytes_to_copy
result.append(c(page[start:end]))
i += count_of_bytes_to_copy
current_result_array_index += count_of_bytes_to_copy
elif control_byte == 0xC0:
for _ in xrange(end_of_first_byte + 3):
result.append(c(page[offset + i + 1]))
current_result_array_index += 1
i += 1
elif control_byte == 0xD0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x40))
current_result_array_index += 1
elif control_byte == 0xE0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x20))
current_result_array_index += 1
elif control_byte == 0xF0:
for _ in xrange(end_of_first_byte + 2):
result.append(c(0x00))
current_result_array_index += 1
else:
self.parent.logger.error('unknown control byte: %s',
control_byte)
i += 1
return b''.join(result)
class RDCDecompressor(Decompressor):
"""
Decompresses data using the Ross Data Compression algorithm
"""
def bytes_to_bits(self, src, offset, length):
result = [0] * (length * 8)
for i in xrange(length):
b = src[offset + i]
for bit in xrange(8):
result[8 * i + (7 - bit)] = 0 if ((b & (1 << bit)) == 0) else 1
return result
def ensure_capacity(self, src, capacity):
if capacity >= len(src):
new_len = max(capacity, 2 * len(src))
src.extend([0] * (new_len - len(src)))
return src
def is_short_rle(self, first_byte_of_cb):
return first_byte_of_cb in set([0x00, 0x01, 0x02, 0x03, 0x04, 0x05])
def is_single_byte_marker(self, first_byte_of_cb):
return first_byte_of_cb in set([0x02, 0x04, 0x06, 0x08, 0x0A])
def is_two_bytes_marker(self, double_bytes_cb):
return len(double_bytes_cb) == 2 and\
((double_bytes_cb[0] >> 4) & 0xF) > 2
def is_three_bytes_marker(self, three_byte_marker):
flag = three_byte_marker[0] >> 4
return len(three_byte_marker) == 3 and (flag & 0xF) in set([1, 2])
def get_length_of_rle_pattern(self, first_byte_of_cb):
if first_byte_of_cb <= 0x05:
return first_byte_of_cb + 3
return 0
def get_length_of_one_byte_pattern(self, first_byte_of_cb):
return first_byte_of_cb + 14\
if self.is_single_byte_marker(first_byte_of_cb) else 0
def get_length_of_two_bytes_pattern(self, double_bytes_cb):
return (double_bytes_cb[0] >> 4) & 0xF
def get_length_of_three_bytes_pattern(self, p_type, three_byte_marker):
if p_type == 1:
return 19 + (three_byte_marker[0] & 0xF) +\
(three_byte_marker[1] * 16)
elif p_type == 2:
return three_byte_marker[2] + 16
return 0
def get_offset_for_one_byte_pattern(self, first_byte_of_cb):
if first_byte_of_cb == 0x08:
return 24
elif first_byte_of_cb == 0x0A:
return 40
return 0
def get_offset_for_two_bytes_pattern(self, double_bytes_cb):
return 3 + (double_bytes_cb[0] & 0xF) + (double_bytes_cb[1] * 16)
def get_offset_for_three_bytes_pattern(self, triple_bytes_cb):
return 3 + (triple_bytes_cb[0] & 0xF) + (triple_bytes_cb[1] * 16)
def clone_byte(self, b, length):
return [b] * length
def decompress_row(self, offset, length, result_length, page):
b = self.to_ord
c = self.to_chr
src_row = [b(x) for x in page[offset:offset + length]]
out_row = [0] * result_length
src_offset = 0
out_offset = 0
while src_offset < (len(src_row) - 2):
prefix_bits = self.bytes_to_bits(src_row, src_offset, 2)
src_offset += 2
for bit_index in xrange(16):
if src_offset >= len(src_row):
break
if prefix_bits[bit_index] == 0:
out_row = self.ensure_capacity(out_row, out_offset)
out_row[out_offset] = src_row[src_offset]
src_offset += 1
out_offset += 1
continue
marker_byte = src_row[src_offset]
try:
next_byte = src_row[src_offset + 1]
except IndexError:
break
if self.is_short_rle(marker_byte):
length = self.get_length_of_rle_pattern(marker_byte)
out_row = self.ensure_capacity(
out_row, out_offset + length
)
pattern = self.clone_byte(next_byte, length)
out_row[out_offset:out_offset + length] = pattern
out_offset += length
src_offset += 2
continue
elif self.is_single_byte_marker(marker_byte) and not\
((next_byte & 0xF0) == ((next_byte << 4) & 0xF0)):
length = self.get_length_of_one_byte_pattern(marker_byte)
out_row = self.ensure_capacity(
out_row, out_offset + length
)
back_offset = self.get_offset_for_one_byte_pattern(
marker_byte
)
start = out_offset - back_offset
end = start + length
out_row[out_offset:out_offset + length] =\
out_row[start:end]
src_offset += 1
out_offset += length
continue
two_bytes_marker = src_row[src_offset:src_offset + 2]
if self.is_two_bytes_marker(two_bytes_marker):
length = self.get_length_of_two_bytes_pattern(
two_bytes_marker
)
out_row = self.ensure_capacity(
out_row, out_offset + length
)
back_offset = self.get_offset_for_two_bytes_pattern(
two_bytes_marker
)
start = out_offset - back_offset
end = start + length
out_row[out_offset:out_offset + length] =\
out_row[start:end]
src_offset += 2
out_offset += length
continue
three_bytes_marker = src_row[src_offset:src_offset + 3]
if self.is_three_bytes_marker(three_bytes_marker):
p_type = (three_bytes_marker[0] >> 4) & 0x0F
back_offset = 0
if p_type == 2:
back_offset = self.get_offset_for_three_bytes_pattern(
three_bytes_marker
)
length = self.get_length_of_three_bytes_pattern(
p_type, three_bytes_marker
)
out_row = self.ensure_capacity(
out_row, out_offset + length
)
if p_type == 1:
pattern = self.clone_byte(
three_bytes_marker[2], length
)
else:
start = out_offset - back_offset
end = start + length
pattern = out_row[start:end]
out_row[out_offset:out_offset + length] = pattern
src_offset += 3
out_offset += length
continue
else:
self.parent.logger.error(
'unknown marker %s at offset %s', src_row[src_offset],
src_offset
)
break
return b''.join([c(x) for x in out_row])
class SAS7BDAT(object):
"""
SAS7BDAT(path[, log_level[, extra_time_format_strings[, \
extra_date_time_format_strings[, extra_date_format_strings]]]]) -> \
SAS7BDAT object
Open a SAS7BDAT file. The log level are standard logging levels
(defaults to logging.INFO).
If your sas7bdat file uses non-standard format strings for time, datetime,
or date values, pass those strings into the constructor using the
appropriate kwarg.
"""
_open_files = []
RLE_COMPRESSION = b'SASYZCRL'
RDC_COMPRESSION = b'SASYZCR2'
COMPRESSION_LITERALS = set([
RLE_COMPRESSION, RDC_COMPRESSION
])
DECOMPRESSORS = {
RLE_COMPRESSION: RLEDecompressor,
RDC_COMPRESSION: RDCDecompressor
}
TIME_FORMAT_STRINGS = set([
'TIME'
])
DATE_TIME_FORMAT_STRINGS = set([
'DATETIME'
])
DATE_FORMAT_STRINGS = set([
'YYMMDD', 'MMDDYY', 'DDMMYY', 'DATE', 'JULIAN', 'MONYY'
])
def __init__(self, path, log_level=logging.INFO,
extra_time_format_strings=None,
extra_date_time_format_strings=None,
extra_date_format_strings=None,
skip_header=False,
encoding='utf8',
encoding_errors='ignore',
align_correction=True):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
if log_level == logging.DEBUG:
sys.excepthook = _debug
self.path = path
self.endianess = None
self.u64 = False
self.logger = self._make_logger(level=log_level)
self._update_format_strings(
self.TIME_FORMAT_STRINGS, extra_time_format_strings
)
self._update_format_strings(
self.DATE_TIME_FORMAT_STRINGS, extra_date_time_format_strings
)
self._update_format_strings(
self.DATE_FORMAT_STRINGS, extra_date_format_strings
)
self.skip_header = skip_header
self.encoding = encoding
self.encoding_errors = encoding_errors
self.align_correction = align_correction
self._file = open(self.path, 'rb')
self._open_files.append(self._file)
self.cached_page = None
self.current_page_type = None
self.current_page_block_count = None
self.current_page_subheaders_count = None
self.current_file_position = 0
self.current_page_data_subheader_pointers = []
self.current_row = []
self.column_names_strings = []
self.column_names = []
self.column_types = []
self.column_data_offsets = []
self.column_data_lengths = []
self.columns = []
self.header = SASHeader(self)
self.properties = self.header.properties
self.header.parse_metadata()
self.logger.debug('\n%s', str(self.header))
self._iter = self.readlines()
def __repr__(self):
"""
x.__repr__() <==> repr(x)
"""
return 'SAS7BDAT file: %s' % os.path.basename(self.path)
def __enter__(self):
"""
__enter__() -> self.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
__exit__(*excinfo) -> None. Closes the file.
"""
self.close()
def __iter__(self):
"""
x.__iter__() <==> iter(x)
"""
return self.readlines()
def _update_format_strings(self, var, format_strings):
if format_strings is not None:
if isinstance(format_strings, str):
var.add(format_strings)
elif isinstance(format_strings, (set, list, tuple)):
var.update(set(format_strings))
else:
raise NotImplementedError
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
A closed file cannot be used for further I/O operations.
close() may be called more than once without error.
Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
return self._file.close()
def _make_logger(self, level=logging.INFO):
"""
Create a custom logger with the specified properties.
"""
logger = logging.getLogger(self.path)
logger.setLevel(level)
fmt = '%(message)s'
stream_handler = logging.StreamHandler()
if platform.system() != 'Windows':
stream_handler.emit = _get_color_emit(
os.path.basename(self.path),
stream_handler.emit
)
else:
fmt = '[%s] %%(message)s' % os.path.basename(self.path)
formatter = logging.Formatter(fmt, '%y-%m-%d %H:%M:%S')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def _read_bytes(self, offsets_to_lengths):
result = {}
if not self.cached_page:
for offset, length in six.iteritems(offsets_to_lengths):
skipped = 0
while skipped < (offset - self.current_file_position):
seek = offset - self.current_file_position - skipped
skipped += seek
self._file.seek(seek, 0)
tmp = self._file.read(length)
if len(tmp) < length:
self.logger.error(
'failed to read %s bytes from sas7bdat file', length
)
self.current_file_position = offset + length
result[offset] = tmp
else:
for offset, length in six.iteritems(offsets_to_lengths):
result[offset] = self.cached_page[offset:offset + length]
return result
def _read_val(self, fmt, raw_bytes, size):
if fmt == 'i' and self.u64 and size == 8:
fmt = 'q'
newfmt = fmt
if fmt == 's':
newfmt = '%ds' % min(size, len(raw_bytes))
elif fmt in set(['number', 'datetime', 'date', 'time']):
newfmt = 'd'
if len(raw_bytes) != size:
size = len(raw_bytes)
if size < 8:
if self.endianess == 'little':
raw_bytes = b''.join([b'\x00' * (8 - size), raw_bytes])
else:
raw_bytes += b'\x00' * (8 - size)
size = 8
if self.endianess == 'big':
newfmt = '>%s' % newfmt
else:
newfmt = '<%s' % newfmt
val = struct.unpack(str(newfmt), raw_bytes[:size])[0]
if fmt == 's':
val = val.strip(b'\x00').strip()
elif math.isnan(val):
val = ''
elif fmt == 'datetime':
val = datetime(1960, 1, 1) + timedelta(seconds=val)
elif fmt == 'time':
val = (datetime(1960, 1, 1) + timedelta(seconds=val)).time()
elif fmt == 'date':
val = (datetime(1960, 1, 1) + timedelta(days=val)).date()
elif fmt in set(['number']):
i = int(val)
if i == val:
val = i
return val
def readlines(self):
"""
readlines() -> generator which yields lists of values, each a line
from the file.
Possible values in the list are None, string, float, datetime.datetime,
datetime.date, and datetime.time.
"""
bit_offset = self.header.PAGE_BIT_OFFSET
subheader_pointer_length = self.header.SUBHEADER_POINTER_LENGTH
row_count = self.header.properties.row_count
current_row_in_file_index = 0
current_row_on_page_index = 0
if not self.skip_header:
yield [x.name.decode(self.encoding, self.encoding_errors)
for x in self.columns]
if not self.cached_page:
self._file.seek(self.properties.header_length)
self._read_next_page()
while current_row_in_file_index < row_count:
current_row_in_file_index += 1
current_page_type = self.current_page_type
if current_page_type == self.header.PAGE_META_TYPE:
try:
current_subheader_pointer =\
self.current_page_data_subheader_pointers[
current_row_on_page_index
]
except IndexError:
self._read_next_page()
current_row_on_page_index = 0
else:
current_row_on_page_index += 1
cls = self.header.SUBHEADER_INDEX_TO_CLASS.get(
self.header.DATA_SUBHEADER_INDEX
)
if cls is None:
raise NotImplementedError
cls(self).process_subheader(
current_subheader_pointer.offset,
current_subheader_pointer.length
)
if current_row_on_page_index ==\
len(self.current_page_data_subheader_pointers):
self._read_next_page()
current_row_on_page_index = 0
elif current_page_type in self.header.PAGE_MIX_TYPE:
if self.align_correction:
align_correction = (
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
self.current_page_subheaders_count *
subheader_pointer_length
) % 8
else:
align_correction = 0
offset = (
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
align_correction + self.current_page_subheaders_count *
subheader_pointer_length + current_row_on_page_index *
self.properties.row_length
)
try:
self.current_row = self._process_byte_array_with_data(
offset,
self.properties.row_length
)
except:
self.logger.exception(
'failed to process data (you might want to try '
'passing align_correction=%s to the SAS7BDAT '
'constructor)' % (not self.align_correction)
)
raise
current_row_on_page_index += 1
if current_row_on_page_index == min(
self.properties.row_count,
self.properties.mix_page_row_count
):
self._read_next_page()
current_row_on_page_index = 0
elif current_page_type == self.header.PAGE_DATA_TYPE:
self.current_row = self._process_byte_array_with_data(
bit_offset + self.header.SUBHEADER_POINTERS_OFFSET +
current_row_on_page_index *
self.properties.row_length,
self.properties.row_length
)
current_row_on_page_index += 1
if current_row_on_page_index == self.current_page_block_count:
self._read_next_page()
current_row_on_page_index = 0
else:
self.logger.error('unknown page type: %s', current_page_type)
yield self.current_row
def _read_next_page(self):
self.current_page_data_subheader_pointers = []
self.cached_page = self._file.read(self.properties.page_length)
if len(self.cached_page) <= 0:
return
if len(self.cached_page) != self.properties.page_length:
self.logger.error(
'failed to read complete page from file (read %s of %s bytes)',
len(self.cached_page), self.properties.page_length
)
self.header.read_page_header()
if self.current_page_type == self.header.PAGE_META_TYPE:
self.header.process_page_metadata()
if self.current_page_type not in [
self.header.PAGE_META_TYPE,
self.header.PAGE_DATA_TYPE
] + self.header.PAGE_MIX_TYPE:
self._read_next_page()
def _process_byte_array_with_data(self, offset, length):
row_elements = []
if self.properties.compression and length < self.properties.row_length:
decompressor = self.DECOMPRESSORS.get(
self.properties.compression
)
source = decompressor(self).decompress_row(
offset, length, self.properties.row_length,
self.cached_page
)
offset = 0
else:
source = self.cached_page
for i in xrange(self.properties.column_count):
length = self.column_data_lengths[i]
if length == 0:
break
start = offset + self.column_data_offsets[i]
end = offset + self.column_data_offsets[i] + length
temp = source[start:end]
if self.columns[i].type == 'number':
if self.column_data_lengths[i] <= 2:
row_elements.append(self._read_val(
'h', temp, length
))
else:
fmt = self.columns[i].format
if not fmt:
row_elements.append(self._read_val(
'number', temp, length
))
elif fmt in self.TIME_FORMAT_STRINGS:
row_elements.append(self._read_val(
'time', temp, length
))
elif fmt in self.DATE_TIME_FORMAT_STRINGS:
row_elements.append(self._read_val(
'datetime', temp, length
))
elif fmt in self.DATE_FORMAT_STRINGS:
row_elements.append(self._read_val(
'date', temp, length
))
else:
row_elements.append(self._read_val(
'number', temp, length
))
else: # string
row_elements.append(self._read_val(
's', temp, length
).decode(self.encoding, self.encoding_errors))
return row_elements
def convert_file(self, out_file, delimiter=',', step_size=100000):
"""
convert_file(out_file[, delimiter[, step_size]]) -> None
A convenience method to convert a SAS7BDAT file into a delimited
text file. Defaults to comma separated. The step_size parameter
is uses to show progress on longer running conversions.
"""
delimiter = str(delimiter)
self.logger.debug('saving as: %s', out_file)
out_f = None
success = True
try:
if out_file == '-':
out_f = sys.stdout
else:
out_f = open(out_file, 'w')
out = csv.writer(out_f, lineterminator='\n', delimiter=delimiter)
i = 0
for i, line in enumerate(self, 1):
if len(line) != (self.properties.column_count or 0):
msg = 'parsed line into %s columns but was ' \
'expecting %s.\n%s' %\
(len(line), self.properties.column_count, line)
self.logger.error(msg)
success = False
if self.logger.level == logging.DEBUG:
raise ParseError(msg)
break
if not i % step_size:
self.logger.info(
'%.1f%% complete',
float(i) / self.properties.row_count * 100.0
)
try:
out.writerow(line)
except IOError:
self.logger.warn('wrote %s lines before interruption', i)
break
self.logger.info(u'\u27f6 [%s] wrote %s of %s lines',
os.path.basename(out_file), i - 1,
self.properties.row_count or 0)
finally:
if out_f is not None:
out_f.close()
return success
def to_data_frame(self):
"""
to_data_frame() -> pandas.DataFrame object
A convenience method to convert a SAS7BDAT file into a pandas
DataFrame.
"""
import pandas as pd
data = list(self.readlines())
return pd.DataFrame([dict(list(zip(data[0], x))) for x in data[1:]])
class Column(object):
def __init__(self, col_id, name, label, col_format, col_type, length):
self.col_id = col_id
self.name = name
self.label = label
self.format = col_format.decode("utf-8")
self.type = col_type
self.length = length
def __repr__(self):
return self.name
class SubheaderPointer(object):
def __init__(self, offset=None, length=None, compression=None,
p_type=None):
self.offset = offset
self.length = length
self.compression = compression
self.type = p_type
class ProcessingSubheader(object):
TEXT_BLOCK_SIZE_LENGTH = 2
ROW_LENGTH_OFFSET_MULTIPLIER = 5
ROW_COUNT_OFFSET_MULTIPLIER = 6
COL_COUNT_P1_MULTIPLIER = 9
COL_COUNT_P2_MULTIPLIER = 10
ROW_COUNT_ON_MIX_PAGE_OFFSET_MULTIPLIER = 15 # rowcountfp
COLUMN_NAME_POINTER_LENGTH = 8
COLUMN_NAME_TEXT_SUBHEADER_OFFSET = 0
COLUMN_NAME_TEXT_SUBHEADER_LENGTH = 2
COLUMN_NAME_OFFSET_OFFSET = 2
COLUMN_NAME_OFFSET_LENGTH = 2
COLUMN_NAME_LENGTH_OFFSET = 4
COLUMN_NAME_LENGTH_LENGTH = 2
COLUMN_DATA_OFFSET_OFFSET = 8
COLUMN_DATA_LENGTH_OFFSET = 8
COLUMN_DATA_LENGTH_LENGTH = 4
COLUMN_TYPE_OFFSET = 14
COLUMN_TYPE_LENGTH = 1
COLUMN_FORMAT_TEXT_SUBHEADER_INDEX_OFFSET = 22
COLUMN_FORMAT_TEXT_SUBHEADER_INDEX_LENGTH = 2
COLUMN_FORMAT_OFFSET_OFFSET = 24
COLUMN_FORMAT_OFFSET_LENGTH = 2
COLUMN_FORMAT_LENGTH_OFFSET = 26
COLUMN_FORMAT_LENGTH_LENGTH = 2
COLUMN_LABEL_TEXT_SUBHEADER_INDEX_OFFSET = 28
COLUMN_LABEL_TEXT_SUBHEADER_INDEX_LENGTH = 2
COLUMN_LABEL_OFFSET_OFFSET = 30
COLUMN_LABEL_OFFSET_LENGTH = 2
COLUMN_LABEL_LENGTH_OFFSET = 32
COLUMN_LABEL_LENGTH_LENGTH = 2
def __init__(self, parent):
self.parent = parent
self.logger = parent.logger
self.properties = parent.header.properties
self.int_length = 8 if self.properties.u64 else 4
def process_subheader(self, offset, length):
raise NotImplementedError
class RowSizeSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
int_len = self.int_length
lcs = offset + (682 if self.properties.u64 else 354)
lcp = offset + (706 if self.properties.u64 else 378)
vals = self.parent._read_bytes({
offset + self.ROW_LENGTH_OFFSET_MULTIPLIER * int_len: int_len,
offset + self.ROW_COUNT_OFFSET_MULTIPLIER * int_len: int_len,
offset + self.ROW_COUNT_ON_MIX_PAGE_OFFSET_MULTIPLIER * int_len:
int_len,
offset + self.COL_COUNT_P1_MULTIPLIER * int_len: int_len,
offset + self.COL_COUNT_P2_MULTIPLIER * int_len: int_len,
lcs: 2,
lcp: 2,
})
if self.properties.row_length is not None:
self.logger.error('found more than one row length subheader')
if self.properties.row_count is not None:
self.logger.error('found more than one row count subheader')
if self.properties.col_count_p1 is not None:
self.logger.error('found more than one col count p1 subheader')
if self.properties.col_count_p2 is not None:
self.logger.error('found more than one col count p2 subheader')
if self.properties.mix_page_row_count is not None:
self.logger.error('found more than one mix page row count '
'subheader')
self.properties.row_length = self.parent._read_val(
'i',
vals[offset + self.ROW_LENGTH_OFFSET_MULTIPLIER * int_len],
int_len
)
self.properties.row_count = self.parent._read_val(
'i',
vals[offset + self.ROW_COUNT_OFFSET_MULTIPLIER * int_len],
int_len
)
self.properties.col_count_p1 = self.parent._read_val(
'i',
vals[offset + self.COL_COUNT_P1_MULTIPLIER * int_len],
int_len
)
self.properties.col_count_p2 = self.parent._read_val(
'i',
vals[offset + self.COL_COUNT_P2_MULTIPLIER * int_len],
int_len
)
self.properties.mix_page_row_count = self.parent._read_val(
'i',
vals[offset + self.ROW_COUNT_ON_MIX_PAGE_OFFSET_MULTIPLIER *
int_len],
int_len
)
self.properties.lcs = self.parent._read_val('h', vals[lcs], 2)
self.properties.lcp = self.parent._read_val('h', vals[lcp], 2)
class ColumnSizeSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
offset += self.int_length
vals = self.parent._read_bytes({
offset: self.int_length
})
if self.properties.column_count is not None:
self.logger.error('found more than one column count subheader')
self.properties.column_count = self.parent._read_val(
'i', vals[offset], self.int_length
)
if self.properties.col_count_p1 + self.properties.col_count_p2 !=\
self.properties.column_count:
self.logger.warning('column count mismatch')
class SubheaderCountsSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
pass # Not sure what to do here yet
class ColumnTextSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
offset += self.int_length
vals = self.parent._read_bytes({
offset: self.TEXT_BLOCK_SIZE_LENGTH
})
text_block_size = self.parent._read_val(
'h', vals[offset], self.TEXT_BLOCK_SIZE_LENGTH
)
vals = self.parent._read_bytes({
offset: text_block_size
})
self.parent.column_names_strings.append(vals[offset])
if len(self.parent.column_names_strings) == 1:
column_name = self.parent.column_names_strings[0]
compression_literal = None
for cl in SAS7BDAT.COMPRESSION_LITERALS:
if cl in column_name:
compression_literal = cl
break
self.properties.compression = compression_literal
offset -= self.int_length
vals = self.parent._read_bytes({
offset + (20 if self.properties.u64 else 16): 8
})
compression_literal = self.parent._read_val(
's',
vals[offset + (20 if self.properties.u64 else 16)],
8
).strip()
if compression_literal == '':
self.properties.lcs = 0
vals = self.parent._read_bytes({
offset + 16 + (20 if self.properties.u64 else 16):
self.properties.lcp
})
creatorproc = self.parent._read_val(
's',
vals[offset + 16 + (20 if self.properties.u64 else 16)],
self.properties.lcp
)
self.properties.creator_proc = creatorproc
elif compression_literal == SAS7BDAT.RLE_COMPRESSION:
vals = self.parent._read_bytes({
offset + 24 + (20 if self.properties.u64 else 16):
self.properties.lcp
})
creatorproc = self.parent._read_val(
's',
vals[offset + 24 + (20 if self.properties.u64 else 16)],
self.properties.lcp
)
self.properties.creator_proc = creatorproc
elif self.properties.lcs > 0:
self.properties.lcp = 0
vals = self.parent._read_bytes({
offset + (20 if self.properties.u64 else 16):
self.properties.lcs
})
creator = self.parent._read_val(
's',
vals[offset + (20 if self.properties.u64 else 16)],
self.properties.lcs
)
self.properties.creator = creator
class ColumnNameSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
offset += self.int_length
column_name_pointers_count = (length - 2 * self.int_length - 12) // 8
for i in xrange(column_name_pointers_count):
text_subheader = (
offset + self.COLUMN_NAME_POINTER_LENGTH * (i + 1) +
self.COLUMN_NAME_TEXT_SUBHEADER_OFFSET
)
col_name_offset = (
offset + self.COLUMN_NAME_POINTER_LENGTH * (i + 1) +
self.COLUMN_NAME_OFFSET_OFFSET
)
col_name_length = (
offset + self.COLUMN_NAME_POINTER_LENGTH * (i + 1) +
self.COLUMN_NAME_LENGTH_OFFSET
)
vals = self.parent._read_bytes({
text_subheader: self.COLUMN_NAME_TEXT_SUBHEADER_LENGTH,
col_name_offset: self.COLUMN_NAME_OFFSET_LENGTH,
col_name_length: self.COLUMN_NAME_LENGTH_LENGTH,
})
idx = self.parent._read_val(
'h', vals[text_subheader],
self.COLUMN_NAME_TEXT_SUBHEADER_LENGTH
)
col_offset = self.parent._read_val(
'h', vals[col_name_offset],
self.COLUMN_NAME_OFFSET_LENGTH
)
col_len = self.parent._read_val(
'h', vals[col_name_length],
self.COLUMN_NAME_LENGTH_LENGTH
)
name_str = self.parent.column_names_strings[idx]
self.parent.column_names.append(
name_str[col_offset:col_offset + col_len]
)
class ColumnAttributesSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
int_len = self.int_length
column_attributes_vectors_count = (
(length - 2 * int_len - 12) // (int_len + 8)
)
for i in xrange(column_attributes_vectors_count):
col_data_offset = (
offset + int_len + self.COLUMN_DATA_OFFSET_OFFSET + i *
(int_len + 8)
)
col_data_len = (
offset + 2 * int_len + self.COLUMN_DATA_LENGTH_OFFSET + i *
(int_len + 8)
)
col_types = (
offset + 2 * int_len + self.COLUMN_TYPE_OFFSET + i *
(int_len + 8)
)
vals = self.parent._read_bytes({
col_data_offset: int_len,
col_data_len: self.COLUMN_DATA_LENGTH_LENGTH,
col_types: self.COLUMN_TYPE_LENGTH,
})
self.parent.column_data_offsets.append(self.parent._read_val(
'i', vals[col_data_offset], int_len
))
self.parent.column_data_lengths.append(self.parent._read_val(
'i', vals[col_data_len], self.COLUMN_DATA_LENGTH_LENGTH
))
ctype = self.parent._read_val(
'b', vals[col_types], self.COLUMN_TYPE_LENGTH
)
self.parent.column_types.append(
'number' if ctype == 1 else 'string'
)
class FormatAndLabelSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
int_len = self.int_length
text_subheader_format = (
offset + self.COLUMN_FORMAT_TEXT_SUBHEADER_INDEX_OFFSET + 3 *
int_len
)
col_format_offset = (
offset + self.COLUMN_FORMAT_OFFSET_OFFSET + 3 * int_len
)
col_format_len = (
offset + self.COLUMN_FORMAT_LENGTH_OFFSET + 3 * int_len
)
text_subheader_label = (
offset + self.COLUMN_LABEL_TEXT_SUBHEADER_INDEX_OFFSET + 3 *
int_len
)
col_label_offset = (
offset + self.COLUMN_LABEL_OFFSET_OFFSET + 3 * int_len
)
col_label_len = (
offset + self.COLUMN_LABEL_LENGTH_OFFSET + 3 * int_len
)
vals = self.parent._read_bytes({
text_subheader_format:
self.COLUMN_FORMAT_TEXT_SUBHEADER_INDEX_LENGTH,
col_format_offset: self.COLUMN_FORMAT_OFFSET_LENGTH,
col_format_len: self.COLUMN_FORMAT_LENGTH_LENGTH,
text_subheader_label:
self.COLUMN_LABEL_TEXT_SUBHEADER_INDEX_LENGTH,
col_label_offset: self.COLUMN_LABEL_OFFSET_LENGTH,
col_label_len: self.COLUMN_LABEL_LENGTH_LENGTH,
})
# min used to prevent incorrect data which appear in some files
format_idx = min(
self.parent._read_val(
'h', vals[text_subheader_format],
self.COLUMN_FORMAT_TEXT_SUBHEADER_INDEX_LENGTH
),
len(self.parent.column_names_strings) - 1
)
format_start = self.parent._read_val(
'h', vals[col_format_offset],
self.COLUMN_FORMAT_OFFSET_LENGTH
)
format_len = self.parent._read_val(
'h', vals[col_format_len],
self.COLUMN_FORMAT_LENGTH_LENGTH
)
# min used to prevent incorrect data which appear in some files
label_idx = min(
self.parent._read_val(
'h', vals[text_subheader_label],
self.COLUMN_LABEL_TEXT_SUBHEADER_INDEX_LENGTH,
),
len(self.parent.column_names_strings) - 1
)
label_start = self.parent._read_val(
'h', vals[col_label_offset],
self.COLUMN_LABEL_OFFSET_LENGTH
)
label_len = self.parent._read_val(
'h', vals[col_label_len],
self.COLUMN_LABEL_LENGTH_LENGTH
)
label_names = self.parent.column_names_strings[label_idx]
column_label = label_names[label_start:label_start + label_len]
format_names = self.parent.column_names_strings[format_idx]
column_format = format_names[format_start:format_start + format_len]
current_column_number = len(self.parent.columns)
self.parent.columns.append(
Column(current_column_number,
self.parent.column_names[current_column_number],
column_label,
column_format,
self.parent.column_types[current_column_number],
self.parent.column_data_lengths[current_column_number])
)
class ColumnListSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
pass # Not sure what to do with this yet
class DataSubheader(ProcessingSubheader):
def process_subheader(self, offset, length):
self.parent.current_row = self.parent._process_byte_array_with_data(
offset, length
)
class SASProperties(object):
def __init__(self):
self.u64 = False
self.endianess = None
self.platform = None
self.name = None
self.file_type = None
self.date_created = None
self.date_modified = None
self.header_length = None
self.page_length = None
self.page_count = None
self.sas_release = None
self.server_type = None
self.os_type = None
self.os_name = None
self.compression = None
self.row_length = None
self.row_count = None
self.col_count_p1 = None
self.col_count_p2 = None
self.mix_page_row_count = None
self.lcs = None
self.lcp = None
self.creator = None
self.creator_proc = None
self.column_count = None
self.filename = None
class SASHeader(object):
MAGIC = b'\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x00\x00\x00\xc2\xea\x81\x60' \
b'\xb3\x14\x11\xcf\xbd\x92\x08\x00' \
b'\x09\xc7\x31\x8c\x18\x1f\x10\x11'
ROW_SIZE_SUBHEADER_INDEX = 'row_size'
COLUMN_SIZE_SUBHEADER_INDEX = 'column_size'
SUBHEADER_COUNTS_SUBHEADER_INDEX = 'subheader_counts'
COLUMN_TEXT_SUBHEADER_INDEX = 'column_text'
COLUMN_NAME_SUBHEADER_INDEX = 'column_name'
COLUMN_ATTRIBUTES_SUBHEADER_INDEX = 'column_attributes'
FORMAT_AND_LABEL_SUBHEADER_INDEX = 'format_and_label'
COLUMN_LIST_SUBHEADER_INDEX = 'column_list'
DATA_SUBHEADER_INDEX = 'data'
# Subheader signatures, 32 and 64 bit, little and big endian
SUBHEADER_SIGNATURE_TO_INDEX = {
b'\xF7\xF7\xF7\xF7': ROW_SIZE_SUBHEADER_INDEX,
b'\x00\x00\x00\x00\xF7\xF7\xF7\xF7': ROW_SIZE_SUBHEADER_INDEX,
b'\xF7\xF7\xF7\xF7\x00\x00\x00\x00': ROW_SIZE_SUBHEADER_INDEX,
b'\xF6\xF6\xF6\xF6': COLUMN_SIZE_SUBHEADER_INDEX,
b'\x00\x00\x00\x00\xF6\xF6\xF6\xF6': COLUMN_SIZE_SUBHEADER_INDEX,
b'\xF6\xF6\xF6\xF6\x00\x00\x00\x00': COLUMN_SIZE_SUBHEADER_INDEX,
b'\x00\xFC\xFF\xFF': SUBHEADER_COUNTS_SUBHEADER_INDEX,
b'\xFF\xFF\xFC\x00': SUBHEADER_COUNTS_SUBHEADER_INDEX,
b'\x00\xFC\xFF\xFF\xFF\xFF\xFF\xFF': SUBHEADER_COUNTS_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFC\x00': SUBHEADER_COUNTS_SUBHEADER_INDEX,
b'\xFD\xFF\xFF\xFF': COLUMN_TEXT_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFD': COLUMN_TEXT_SUBHEADER_INDEX,
b'\xFD\xFF\xFF\xFF\xFF\xFF\xFF\xFF': COLUMN_TEXT_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD': COLUMN_TEXT_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF': COLUMN_NAME_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF': COLUMN_NAME_SUBHEADER_INDEX,
b'\xFC\xFF\xFF\xFF': COLUMN_ATTRIBUTES_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFC': COLUMN_ATTRIBUTES_SUBHEADER_INDEX,
b'\xFC\xFF\xFF\xFF\xFF\xFF\xFF\xFF': COLUMN_ATTRIBUTES_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFC': COLUMN_ATTRIBUTES_SUBHEADER_INDEX,
b'\xFE\xFB\xFF\xFF': FORMAT_AND_LABEL_SUBHEADER_INDEX,
b'\xFF\xFF\xFB\xFE': FORMAT_AND_LABEL_SUBHEADER_INDEX,
b'\xFE\xFB\xFF\xFF\xFF\xFF\xFF\xFF': FORMAT_AND_LABEL_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFB\xFE': FORMAT_AND_LABEL_SUBHEADER_INDEX,
b'\xFE\xFF\xFF\xFF': COLUMN_LIST_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFE': COLUMN_LIST_SUBHEADER_INDEX,
b'\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF': COLUMN_LIST_SUBHEADER_INDEX,
b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE': COLUMN_LIST_SUBHEADER_INDEX,
}
SUBHEADER_INDEX_TO_CLASS = {
ROW_SIZE_SUBHEADER_INDEX: RowSizeSubheader,
COLUMN_SIZE_SUBHEADER_INDEX: ColumnSizeSubheader,
SUBHEADER_COUNTS_SUBHEADER_INDEX: SubheaderCountsSubheader,
COLUMN_TEXT_SUBHEADER_INDEX: ColumnTextSubheader,
COLUMN_NAME_SUBHEADER_INDEX: ColumnNameSubheader,
COLUMN_ATTRIBUTES_SUBHEADER_INDEX: ColumnAttributesSubheader,
FORMAT_AND_LABEL_SUBHEADER_INDEX: FormatAndLabelSubheader,
COLUMN_LIST_SUBHEADER_INDEX: ColumnListSubheader,
DATA_SUBHEADER_INDEX: DataSubheader,
}
ALIGN_1_CHECKER_VALUE = b'3'
ALIGN_1_OFFSET = 32
ALIGN_1_LENGTH = 1
ALIGN_1_VALUE = 4
U64_BYTE_CHECKER_VALUE = b'3'
ALIGN_2_OFFSET = 35
ALIGN_2_LENGTH = 1
ALIGN_2_VALUE = 4
ENDIANNESS_OFFSET = 37
ENDIANNESS_LENGTH = 1
PLATFORM_OFFSET = 39
PLATFORM_LENGTH = 1
DATASET_OFFSET = 92
DATASET_LENGTH = 64
FILE_TYPE_OFFSET = 156
FILE_TYPE_LENGTH = 8
DATE_CREATED_OFFSET = 164
DATE_CREATED_LENGTH = 8
DATE_MODIFIED_OFFSET = 172
DATE_MODIFIED_LENGTH = 8
HEADER_SIZE_OFFSET = 196
HEADER_SIZE_LENGTH = 4
PAGE_SIZE_OFFSET = 200
PAGE_SIZE_LENGTH = 4
PAGE_COUNT_OFFSET = 204
PAGE_COUNT_LENGTH = 4
SAS_RELEASE_OFFSET = 216
SAS_RELEASE_LENGTH = 8
SAS_SERVER_TYPE_OFFSET = 224
SAS_SERVER_TYPE_LENGTH = 16
OS_VERSION_NUMBER_OFFSET = 240
OS_VERSION_NUMBER_LENGTH = 16
OS_MAKER_OFFSET = 256
OS_MAKER_LENGTH = 16
OS_NAME_OFFSET = 272
OS_NAME_LENGTH = 16
PAGE_BIT_OFFSET_X86 = 16
PAGE_BIT_OFFSET_X64 = 32
SUBHEADER_POINTER_LENGTH_X86 = 12
SUBHEADER_POINTER_LENGTH_X64 = 24
PAGE_TYPE_OFFSET = 0
PAGE_TYPE_LENGTH = 2
BLOCK_COUNT_OFFSET = 2
BLOCK_COUNT_LENGTH = 2
SUBHEADER_COUNT_OFFSET = 4
SUBHEADER_COUNT_LENGTH = 2
PAGE_META_TYPE = 0
PAGE_DATA_TYPE = 256
PAGE_MIX_TYPE = [512, 640]
PAGE_AMD_TYPE = 1024
PAGE_METC_TYPE = 16384
PAGE_COMP_TYPE = -28672
PAGE_MIX_DATA_TYPE = PAGE_MIX_TYPE + [PAGE_DATA_TYPE]
PAGE_META_MIX_AMD = [PAGE_META_TYPE] + PAGE_MIX_TYPE + [PAGE_AMD_TYPE]
PAGE_ANY = PAGE_META_MIX_AMD +\
[PAGE_DATA_TYPE, PAGE_METC_TYPE, PAGE_COMP_TYPE]
SUBHEADER_POINTERS_OFFSET = 8
TRUNCATED_SUBHEADER_ID = 1
COMPRESSED_SUBHEADER_ID = 4
COMPRESSED_SUBHEADER_TYPE = 1
def __init__(self, parent):
self.parent = parent
self.properties = SASProperties()
self.properties.filename = os.path.basename(parent.path)
# Check magic number
h = parent.cached_page = parent._file.read(288)
if len(h) < 288:
parent.logger.error('header too short (not a sas7bdat file?)')
return
if not self.check_magic_number(h):
parent.logger.error('magic number mismatch')
return
align1 = 0
align2 = 0
offsets_and_lengths = {
self.ALIGN_1_OFFSET: self.ALIGN_1_LENGTH,
self.ALIGN_2_OFFSET: self.ALIGN_2_LENGTH,
}
align_vals = parent._read_bytes(offsets_and_lengths)
if align_vals[self.ALIGN_1_OFFSET] == self.U64_BYTE_CHECKER_VALUE:
align2 = self.ALIGN_2_VALUE
self.properties.u64 = True
if align_vals[self.ALIGN_2_OFFSET] == self.ALIGN_1_CHECKER_VALUE:
align1 = self.ALIGN_1_VALUE
total_align = align1 + align2
offsets_and_lengths = {
self.ENDIANNESS_OFFSET: self.ENDIANNESS_LENGTH,
self.PLATFORM_OFFSET: self.PLATFORM_LENGTH,
self.DATASET_OFFSET: self.DATASET_LENGTH,
self.FILE_TYPE_OFFSET: self.FILE_TYPE_LENGTH,
self.DATE_CREATED_OFFSET + align1: self.DATE_CREATED_LENGTH,
self.DATE_MODIFIED_OFFSET + align1: self.DATE_MODIFIED_LENGTH,
self.HEADER_SIZE_OFFSET + align1: self.HEADER_SIZE_LENGTH,
self.PAGE_SIZE_OFFSET + align1: self.PAGE_SIZE_LENGTH,
self.PAGE_COUNT_OFFSET + align1: self.PAGE_COUNT_LENGTH + align2,
self.SAS_RELEASE_OFFSET + total_align: self.SAS_RELEASE_LENGTH,
self.SAS_SERVER_TYPE_OFFSET + total_align:
self.SAS_SERVER_TYPE_LENGTH,
self.OS_VERSION_NUMBER_OFFSET + total_align:
self.OS_VERSION_NUMBER_LENGTH,
self.OS_MAKER_OFFSET + total_align: self.OS_MAKER_LENGTH,
self.OS_NAME_OFFSET + total_align: self.OS_NAME_LENGTH,
}
vals = parent._read_bytes(offsets_and_lengths)
self.properties.endianess = 'little'\
if vals[self.ENDIANNESS_OFFSET] == b'\x01' else 'big'
parent.endianess = self.properties.endianess
if vals[self.PLATFORM_OFFSET] == b'1':
self.properties.platform = 'unix'
elif vals[self.PLATFORM_OFFSET] == b'2':
self.properties.platform = 'windows'
else:
self.properties.platform = 'unknown'
self.properties.name = parent._read_val(
's', vals[self.DATASET_OFFSET], self.DATASET_LENGTH
)
self.properties.file_type = parent._read_val(
's', vals[self.FILE_TYPE_OFFSET], self.FILE_TYPE_LENGTH
)
# Timestamp is epoch 01/01/1960
try:
self.properties.date_created = datetime(1960, 1, 1) + timedelta(
seconds=parent._read_val(
'd', vals[self.DATE_CREATED_OFFSET + align1],
self.DATE_CREATED_LENGTH
)
)
except:
pass
try:
self.properties.date_modified = datetime(1960, 1, 1) + timedelta(
seconds=parent._read_val(
'd', vals[self.DATE_MODIFIED_OFFSET + align1],
self.DATE_MODIFIED_LENGTH
)
)
except:
pass
self.properties.header_length = parent._read_val(
'i', vals[self.HEADER_SIZE_OFFSET + align1],
self.HEADER_SIZE_LENGTH
)
if self.properties.u64 and self.properties.header_length != 8192:
parent.logger.warning('header length %s != 8192',
self.properties.header_length)
parent.cached_page += parent._file.read(
self.properties.header_length - 288
)
h = parent.cached_page
if len(h) != self.properties.header_length:
parent.logger.error('header too short (not a sas7bdat file?)')
return
self.properties.page_length = parent._read_val(
'i', vals[self.PAGE_SIZE_OFFSET + align1],
self.PAGE_SIZE_LENGTH
)
self.properties.page_count = parent._read_val(
'i', vals[self.PAGE_COUNT_OFFSET + align1],
self.PAGE_COUNT_LENGTH
)
self.properties.sas_release = parent._read_val(
's', vals[self.SAS_RELEASE_OFFSET + total_align],
self.SAS_RELEASE_LENGTH
)
self.properties.server_type = parent._read_val(
's', vals[self.SAS_SERVER_TYPE_OFFSET + total_align],
self.SAS_SERVER_TYPE_LENGTH
)
self.properties.os_type = parent._read_val(
's', vals[self.OS_VERSION_NUMBER_OFFSET + total_align],
self.OS_VERSION_NUMBER_LENGTH
)
if vals[self.OS_NAME_OFFSET + total_align] != 0:
self.properties.os_name = parent._read_val(
's', vals[self.OS_NAME_OFFSET + total_align],
self.OS_NAME_LENGTH
)
else:
self.properties.os_name = parent._read_val(
's', vals[self.OS_MAKER_OFFSET + total_align],
self.OS_MAKER_LENGTH
)
parent.u64 = self.properties.u64
def __repr__(self):
cols = [['Num', 'Name', 'Type', 'Length', 'Format', 'Label']]
align = ['>', '<', '<', '>', '<', '<']
col_width = [len(x) for x in cols[0]]
for i, col in enumerate(self.parent.columns, 1):
tmp = [i, col.name, col.type, col.length,
col.format, col.label]
cols.append(tmp)
for j, val in enumerate(tmp):
col_width[j] = max(col_width[j], len(str(val)))
rows = [' '.join('{0:{1}}'.format(x, col_width[i])
for i, x in enumerate(cols[0])),
' '.join('-' * col_width[i]
for i in xrange(len(align)))]
for row in cols[1:]:
rows.append(' '.join(
'{0:{1}{2}}'.format(x.decode(self.parent.encoding,
self.parent.encoding_errors)
if isinstance(x, bytes) else x,
align[i], col_width[i])
for i, x in enumerate(row))
)
cols = '\n'.join(rows)
hdr = 'Header:\n%s' % '\n'.join(
['\t%s: %s' % (k, v.decode(self.parent.encoding,
self.parent.encoding_errors)
if isinstance(v, bytes) else v)
for k, v in sorted(six.iteritems(self.properties.__dict__))]
)
return '%s\n\nContents of dataset "%s":\n%s\n' % (
hdr,
self.properties.name.decode(self.parent.encoding,
self.parent.encoding_errors),
cols
)
def _page_bit_offset(self):
return self.PAGE_BIT_OFFSET_X64 if self.properties.u64 else\
self.PAGE_BIT_OFFSET_X86
PAGE_BIT_OFFSET = property(_page_bit_offset)
def _subheader_pointer_length(self):
return self.SUBHEADER_POINTER_LENGTH_X64 if self.properties.u64 else\
self.SUBHEADER_POINTER_LENGTH_X86
SUBHEADER_POINTER_LENGTH = property(_subheader_pointer_length)
def check_magic_number(self, header):
return header[:len(self.MAGIC)] == self.MAGIC
def parse_metadata(self):
done = False
while not done:
self.parent.cached_page = self.parent._file.read(
self.properties.page_length
)
if len(self.parent.cached_page) <= 0:
break
if len(self.parent.cached_page) != self.properties.page_length:
self.parent.logger.error(
'Failed to read a meta data page from file'
)
done = self.process_page_meta()
def read_page_header(self):
bit_offset = self.PAGE_BIT_OFFSET
vals = self.parent._read_bytes({
self.PAGE_TYPE_OFFSET + bit_offset: self.PAGE_TYPE_LENGTH,
self.BLOCK_COUNT_OFFSET + bit_offset: self.BLOCK_COUNT_LENGTH,
self.SUBHEADER_COUNT_OFFSET + bit_offset:
self.SUBHEADER_COUNT_LENGTH
})
self.parent.current_page_type = self.parent._read_val(
'h', vals[self.PAGE_TYPE_OFFSET + bit_offset],
self.PAGE_TYPE_LENGTH
)
self.parent.current_page_block_count = self.parent._read_val(
'h', vals[self.BLOCK_COUNT_OFFSET + bit_offset],
self.BLOCK_COUNT_LENGTH
)
self.parent.current_page_subheaders_count = self.parent._read_val(
'h', vals[self.SUBHEADER_COUNT_OFFSET + bit_offset],
self.SUBHEADER_COUNT_LENGTH
)
def process_page_meta(self):
self.read_page_header()
if self.parent.current_page_type in self.PAGE_META_MIX_AMD:
self.process_page_metadata()
return self.parent.current_page_type in self.PAGE_MIX_DATA_TYPE or \
self.parent.current_page_data_subheader_pointers
def process_page_metadata(self):
parent = self.parent
bit_offset = self.PAGE_BIT_OFFSET
for i in xrange(parent.current_page_subheaders_count):
pointer = self.process_subheader_pointers(
self.SUBHEADER_POINTERS_OFFSET + bit_offset, i
)
if not pointer.length:
continue
if pointer.compression != self.TRUNCATED_SUBHEADER_ID:
subheader_signature = self.read_subheader_signature(
pointer.offset
)
subheader_index = self.get_subheader_class(
subheader_signature,
pointer.compression,
pointer.type
)
if subheader_index is not None:
if subheader_index != self.DATA_SUBHEADER_INDEX:
cls = self.SUBHEADER_INDEX_TO_CLASS.get(
subheader_index
)
if cls is None:
raise NotImplementedError
cls(parent).process_subheader(
pointer.offset,
pointer.length
)
else:
parent.current_page_data_subheader_pointers.append(
pointer
)
else:
parent.logger.debug('unknown subheader signature')
def read_subheader_signature(self, offset):
length = 8 if self.properties.u64 else 4
return self.parent._read_bytes({offset: length})[offset]
def get_subheader_class(self, signature, compression, type):
index = self.SUBHEADER_SIGNATURE_TO_INDEX.get(signature)
if self.properties.compression is not None and index is None and\
(compression == self.COMPRESSED_SUBHEADER_ID or
compression == 0) and type == self.COMPRESSED_SUBHEADER_TYPE:
index = self.DATA_SUBHEADER_INDEX
return index
def process_subheader_pointers(self, offset, subheader_pointer_index):
length = 8 if self.properties.u64 else 4
subheader_pointer_length = self.SUBHEADER_POINTER_LENGTH
total_offset = (
offset + subheader_pointer_length * subheader_pointer_index
)
vals = self.parent._read_bytes({
total_offset: length,
total_offset + length: length,
total_offset + 2 * length: 1,
total_offset + 2 * length + 1: 1,
})
subheader_offset = self.parent._read_val(
'i', vals[total_offset], length
)
subheader_length = self.parent._read_val(
'i', vals[total_offset + length], length
)
subheader_compression = self.parent._read_val(
'b', vals[total_offset + 2 * length], 1
)
subheader_type = self.parent._read_val(
'b', vals[total_offset + 2 * length + 1], 1
)
return SubheaderPointer(subheader_offset, subheader_length,
subheader_compression, subheader_type)
@atexit.register
def _close_files():
for f in SAS7BDAT._open_files:
f.close()
if __name__ == '__main__':
pass # TODO: write some unit tests
|
mit
|
spelteam/spel
|
src/python/plotLabelErrors_global.py
|
1
|
11536
|
#! /usr/bin/env python2.7
import glob
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import argparse
import numpy as np
import matplotlib.image as mpimg
from matplotlib.lines import Line2D
from pylab import figure, show
import math
import os
import re
import random
def usage():
print("Author: Mykyta Fastovets / poselib project / 2015")
print("This utility is an analysis tool for plotting error files generated by the poselib tuners.")
print("Input should be a .err file.")
print("Example usage: ./plotSimVsTemp.py ~/file.err ")
def dist(a,b):
return math.sqrt((a[0]-b[0])**2+(a[1] - b[1])**2)
parser = argparse.ArgumentParser(description='1 non-optional argument')
parser.add_argument('ERRIN', action="store")
parseResult = parser.parse_args()
#DATADIR contains that folder that contains all other data
configFile = parseResult.ERRIN
errFiles = [line.strip() for line in open(configFile)] #read the data from the int file
print errFiles
fig = plt.figure(1, figsize=(20.2, 10.8), dpi=600)
# ax = fig.add_subplot(211)#, projection='2d')
# ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# ax.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
bx = fig.add_subplot(111)#, projection='2d')
bx.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
bx.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
pcol = [(0.94,0.64,1.0),(0.0,0.46,0.86),(0.6,0.25,0.0),(0.3,0,0.36),(0.1,0.1,0.1),(0,0.36,0.19),(0.17,0.8,0.28),(1,0.8,0.6),
(0.5,0.5,0.5),(0.58,1,0.71),(0.56,0.47,0), (0.6, 0.8, 1.0), (0.25, 0.6, 0.0), (0.86, 0.0, 0.46), (1.0, 1.0, 0.64), (0, 0.5, 1.0), (1.0, 0.5, 0.0)]
colCount=0
for errFile in errFiles:
print 'Reading '+errFile
myFile = open(errFile)
data = [line.strip().split() for line in open(errFile)] #read the data from the int file
frameIndex=-1
partIndex=-1
paramValue=-1
itemIndex=0
readMode = 'false'
result=[]
paramData=[]
frameData=[]
partData=[]
for dataItem in data:
if dataItem[0]=='{':
readMode='true'
paramValue=data[itemIndex-1][0]
elif dataItem[0]=='}':
result.append([paramValue, paramData])
paramData=[]
elif dataItem[0]=='[':
frameIndex=data[itemIndex-1][0]
elif dataItem[0]==']':
paramData.append([frameIndex, frameData])
frameData=[]
elif dataItem[0]=='(':
partIndex=data[itemIndex-1][0]
elif dataItem[0]==')':
frameData.append([partIndex, partData])
partData=[]
elif len(dataItem)>1 and readMode=='true':
partData.append(dataItem)
itemIndex+=1
#Create plottable objects, with in x,y format and define the graphs that will be produced
numParams = len(result)
#Detector quality graphs, to measure correspondence of score to RMS error, there will be a line for each param
numTopLables=100 #define number of labels from the top scorers to take
percentTopLabels=10 #define the percentage of labels to analyse more deeply from the top
topErrorsP = [] #percent errors
topErrorsA = [] #absolute errors
#Param evaluation graphs, to measure how each parameter setting performs
percentileParamErrors=[]
#print result
nParts = 0
tme=[]
tmi=[]
for i in range(numParams):
tme.append([])
tmi.append([])
topErr=[]
topCnt=[]
topErrAbs=[]
topCntAbs=[]
for r in range(numTopLables):
topErr.append(0)
topCnt.append(0)
topErrAbs.append(0)
topCntAbs.append(0)
#pcol = "#%06x" % random.randint(0,0xFFFFFF)
numFrames = len(result[i][1])
x = []
y = []
z = []
z2 = []
r10=[]
rTest=0
rCount=0
R=[]
for r in range(11):
R.append(0)
partErrors=[] #errors
for j in range(numFrames):
partFrameErrors=[]
numParts = len(result[i][1][j][1])
nParts=numParts
avgMinIndex=0.0
rmsError=0.0
RMS=[]
for r in range(11):
RMS.append(0)
#print i
#print j
#print numParts
for k in range(numParts):
partID = result[i][1][j][1][k][0]
numLabels=len(result[i][1][j][1][k][1]) #number of labels in this part
if partID > 5 or partID == 0: #discard limbs 1 through 5
tenth=float(numLabels)*0.1
percent = float(numLabels)*0.01
col = "#%06x" % random.randint(0,0xFFFFFF)
minError=1000000000
minIndex=-1.0
topMinErr=1000000000
topMinIndex=-1.0
topError=float(result[i][1][j][1][k][1][0][2])
rms=[]
cnts=[]
for r in range(11):
rms.append(0)
cnts.append(0)
rms[0] = topError #set the top error
cnts[0]=1
for l in range(numLabels):
errVal = float(result[i][1][j][1][k][1][l][2])
if l<numTopLables:
topErrAbs[l]+=errVal
topCntAbs[l]+=1
index=int(l/percent)
topErr[index]+=errVal
topCnt[index]+=1
if l<tenth:
rms[1]+=errVal #add error
cnts[1]+=1
rTest+=errVal
rCount+=1
if errVal < topMinErr:
topMinErr = errVal
topMinIndex = l
if l<tenth*2:
rms[2]+=errVal #add error
cnts[2]+=1
if l<tenth*3:
rms[3]+=errVal #add error
cnts[3]+=1
if l<tenth*4:
rms[4]+=errVal #add error
cnts[4]+=1
if l<tenth*5:
rms[5]+=errVal #add error
cnts[5]+=1
if l<tenth*6:
rms[6]+=errVal #add error
cnts[6]+=1
if l<tenth*7:
rms[7]+=errVal #add error
cnts[7]+=1
if l<tenth*8:
rms[8]+=errVal #add error
cnts[8]+=1
if l<tenth*9:
rms[9]+=errVal #add error
cnts[9]+=1
if l<tenth*10:
rms[10]+=errVal #add error
cnts[10]+=1
if errVal < minError:
minError = errVal
minIndex = int(result[i][1][j][1][k][1][l][0])
for ev in range(11):
if ev>0 and tenth>0:
RMS[ev]+=float(rms[ev])/float(cnts[ev])
rmsError+=topError
avgMinIndex+=minIndex
tme[i].append(topMinErr)
tmi[i].append(topMinIndex)
partFrameErrors.append(partErrors)
for ev in range(11):
if ev>0 and numParts>0:
R[ev] += float(RMS[ev])/float(numParts)
avgMinIndex=float(avgMinIndex)/float(numParts)
rmsError=float(rmsError)/float(numParts)
x.append(float(result[i][0])) #
y.append(int(result[i][1][j][0])) #frame number
z.append(avgMinIndex) #average index
z2.append(rmsError) #rms error for all top labels
for r in range(numTopLables):
if topCnt[r]!=0:
topErr[r]=topErr[r]/topCnt[r]
if topCntAbs[r]!=0:
topErrAbs[r] = topErrAbs[r]/topCntAbs[r]
for ev in range(11):
if ev>0 and numParts>0:
R[ev] = float(R[ev])/float(numFrames)
#partParamErrors.append(partErrors)
topErrorsP.append(topErr)
topErrorsA.append(topErrAbs)
percentileParamErrors.append(R)
#Now do the parts analysis for top 10% of labels
partParamErrors=[] #not done yet
for i in range(numParams):
numFrames = len(result[i][1])
partErrors=[]
for r in range(nParts):
partErrors.append(0)
for j in range(numFrames):
numParts = len(result[i][1][j][1])
for k in range(numParts):
partErr=0
partCnt=0
partID = int(result[i][1][j][1][k][0])
numLabels=len(result[i][1][j][1][k][1]) #number of labels in this part
tenth=float(numLabels)*0.1
for l in range(numLabels):
errVal = float(result[i][1][j][1][k][1][l][2])
if l<tenth:
partErr+=errVal #add error
partCnt+=1
#print float(partErr)/float(partCnt)
partErrors[partID]+=float(partErr)/float(partCnt) #store averages across labels
for p in range(len(partErrors)):
partErrors[p] = float(partErrors[p])/float(numFrames) #divide by numFrames
#print partErrors[p]
#raw_input('not here')
partParamErrors.append(partErrors)
#print partParamErrors
#raw_input('here')
#Do plotting
# fig2 = plt.figure(2, figsize=(44.2, 10.8), dpi=600)
# cx = fig2.add_subplot(111)#, projection='3d')
# cx.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# cx.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# dx = fig2.add_subplot(212)#, projection='3d')
# dx.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# dx.xaxis.grid(True, linestyle='-', which='major', color='lightgrey',
# alpha=0.5)
# pcol = [(0.94,0.64,1.0),(0.0,0.46,0.86),(0.6,0.25,0.0),(0.3,0,0.36),(0.1,0.1,0.1),(0,0.36,0.19),(0.17,0.8,0.28),(1,0.8,0.6),
# (0.5,0.5,0.5),(0.58,1,0.71),(0.56,0.47,0), (0.6, 0.8, 1.0), (0.25, 0.6, 0.0), (0.86, 0.0, 0.46), (1.0, 1.0, 0.64), (0, 0.5, 1.0), (1.0, 0.5, 0.0)]
#colors = itertools.cycle(['red', 'blue', 'green', 'magenta', 'cyan', 'black'])
paramName = data[4][1]
paramVals=[]
pp=[]
pc=[]
for p in range(nParts):
pc.append([])
for p in range(nParts):
for i in range(numParams):
pc[p].append(partParamErrors[i][p]) #this is the part error at this param value
#print partParamErrors[i][p]
#print percentileParamErrors
plotLabel = errFile.split('_')[-1].split('.')[0]
for i in range(numParams):
paramVal = result[i][0]
paramVals.append(paramVal)
pp.append(percentileParamErrors[i][1])
if colCount==0:
bx.boxplot(tmi)#, label=str(ev))
#cx.plot(range(numTopLables), topErrorsP[i], color=pcol[i], alpha=1.0, label=str(result[i][0]), linewidth=4.0) #draw min ranks
#dx.plot(range(numTopLables), topErrorsA[i], color=pcol[i], alpha=1.0, label=str(result[i][0]), linewidth=4.0) #draw min ranks
# for p in range(nParts):
# if p==0 or p>5: #=0 or p>5: # don't plot the useless parts
# bx.plot(paramVals, pc[p], color=pcol[p], alpha=1.0, label='Part '+str(p), linewidth=4.0)#, label=str(ev)) #a point at each parameters setting, where ev is the percentile 0=1, 1=10,..., 10=100
#bx.plot(paramVals, pp, color=pcol[colCount], alpha=1.0, label=plotLabel, linewidth=4.0)#, label=str(ev)) #a point at each parameters setting, where ev is the percentile 0=1, 1=10,..., 10=100
#ax.set_xlabel(paramName+' value', fontsize=18)
#ax.set_ylabel('RMS Error (pixels)', fontsize=18)
colCount+=1
plt.rc('legend',**{'fontsize':20})
plt.tick_params(axis='both', which='major', labelsize=20)
plt.tick_params(axis='both', which='minor', labelsize=20)
bx.set_xlabel(paramName+' value', fontsize=25)
bx.set_ylabel('RMS Error (pixels)', fontsize=25)
# cx.set_xlabel('Label Rank (%)', fontsize=25)
# cx.set_ylabel('RMS Error (pixels)', fontsize=25)
# dx.set_xlabel('Label Rank', fontsize=18)
# dx.set_ylabel('RMS Error (pixels)', fontsize=18)
handles1, labels1 = bx.get_legend_handles_labels()
bx.legend(handles1, labels1)
bx.grid()
bx.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
ncol=3, fancybox=True, shadow=True)
bx.get_legend().set_title(title="Part")
# handles, labels = cx.get_legend_handles_labels()
# cx.legend(handles, labels)
# cx.grid()
# cx.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),
# ncol=3, fancybox=True, shadow=True)
# cx.get_legend().set_title(title=str(paramName)+" value")
#plt.setp(cx.get_legend().get_title(),fontsize=24)
plt.setp(bx.get_legend().get_title(),fontsize=24)
plotSave = configFile.split('.')[0]+'_s.png'
#plot2Save = errFile.split('.')[0]+'_q.png'
#fig.suptitle("Top 10% labels vs Error ", fontsize=35)
#fig2.suptitle("Error vs Label Rank", fontsize=30)
fig.savefig(plotSave, bbox_inches='tight')
#fig2.savefig(plot2Save, bbox_inches='tight')
print 'Saved to '+ plotSave
|
gpl-3.0
|
murali-munna/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
joshloyal/scikit-learn
|
sklearn/tests/test_multiclass.py
|
26
|
26681
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils import shuffle
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron, LogisticRegression,
SGDClassifier)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
from sklearn.externals.six.moves import zip
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
# Fail on multioutput data
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1, 2], [3, 1]]))
assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit,
np.array([[1, 0], [0, 1]]),
np.array([[1.5, 2.4], [3.1, 0.8]]))
def test_check_classification_targets():
# Test that check_classification_target return correct type. #5782
y = np.array([0.0, 1.1, 2.0, 3.0])
msg = type_of_target(y)
assert_raise_message(ValueError, msg, check_classification_targets, y)
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_partial_fit():
# Test if partial_fit is working as intented
X, y = shuffle(iris.data, iris.target, random_state=0)
ovr = OneVsRestClassifier(MultinomialNB())
ovr.partial_fit(X[:100], y[:100], np.unique(y))
ovr.partial_fit(X[100:], y[100:])
pred = ovr.predict(X)
ovr2 = OneVsRestClassifier(MultinomialNB())
pred2 = ovr2.fit(X, y).predict(X)
assert_almost_equal(pred, pred2)
assert_equal(len(ovr.estimators_), len(np.unique(y)))
assert_greater(np.mean(y == pred), 0.65)
# Test when mini batches doesn't have all classes
# with SGDClassifier
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
ovr.partial_fit(X[:7], y[:7], np.unique(y))
ovr.partial_fit(X[7:], y[7:])
pred = ovr.predict(X)
ovr1 = OneVsRestClassifier(SGDClassifier(n_iter=1, shuffle=False,
random_state=0))
pred1 = ovr1.fit(X, y).predict(X)
assert_equal(np.mean(pred == y), np.mean(pred1 == y))
def test_ovr_partial_fit_exceptions():
ovr = OneVsRestClassifier(MultinomialNB())
X = np.abs(np.random.randn(14, 2))
y = [1, 1, 1, 1, 2, 3, 3, 0, 0, 2, 3, 1, 2, 3]
ovr.partial_fit(X[:7], y[:7], np.unique(y))
# A new class value which was not in the first call of partial_fit
# It should raise ValueError
y1 = [5] + y[7:-1]
assert_raises_regex(ValueError, "Mini-batch contains \[.+\] while classes"
" must be subset of \[.+\]",
ovr.partial_fit, X=X[7:], y=y1)
def test_ovr_ovo_regressor():
# test that ovr and ovo work on regressors which don't have a decision_
# function
ovr = OneVsRestClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
ovr = OneVsOneClassifier(DecisionTreeRegressor())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes * (n_classes - 1) / 2)
assert_array_equal(np.unique(pred), [0, 1, 2])
# we are doing something sensible
assert_greater(np.mean(pred == iris.target), .9)
def test_ovr_fit_predict_sparse():
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix,
sp.lil_matrix]:
base_clf = MultinomialNB(alpha=1)
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train))
Y_pred_sprs = clf_sprs.predict(X_test)
assert_true(clf.multilabel_)
assert_true(sp.issparse(Y_pred_sprs))
assert_array_equal(Y_pred_sprs.toarray(), Y_pred)
# Test predict_proba
Y_proba = clf_sprs.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred_sprs.toarray())
# Test decision_function
clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train))
dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int)
assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray())
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent.
# Note: tests is the case where _ConstantPredictor is utilised
X = np.ones((10, 2))
X[:5, :] = 0
# Build an indicator matrix where two features are always on.
# As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)]
y = np.zeros((10, 3))
y[5:, 0] = 1
y[:, 1] = 1
y[:, 2] = 1
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
y_pred = ovr.decision_function(X)
assert_equal(np.unique(y_pred[:, -2:]), 1)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.ones(X.shape[0]))
# y has a constantly absent label
y = np.zeros((10, 2))
y[5:, 0] = 1 # variable label
ovr = OneVsRestClassifier(LogisticRegression())
assert_warns(UserWarning, ovr.fit, X, y)
y_pred = ovr.predict_proba(X)
assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0]))
def test_ovr_multiclass():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "ham", "eggs", "ham"]
Y = np.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet()):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 0, 4]])[0]
assert_array_equal(y_pred, [0, 0, 1])
def test_ovr_binary():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]])
y = ["eggs", "spam", "spam", "eggs", "spam"]
Y = np.array([[0, 1, 1, 0, 1]]).T
classes = set("eggs spam".split())
def conduct_test(base_clf, test_predict_proba=False):
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict(np.array([[0, 0, 4]]))[0]
assert_equal(set(y_pred), set("eggs"))
if test_predict_proba:
X_test = np.array([[0, 0, 4]])
probabilities = clf.predict_proba(X_test)
assert_equal(2, len(probabilities[0]))
assert_equal(clf.classes_[np.argmax(probabilities, axis=1)],
clf.predict(X_test))
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[3, 0, 0]])[0]
assert_equal(y_pred, 1)
for base_clf in (LinearSVC(random_state=0), LinearRegression(),
Ridge(), ElasticNet()):
conduct_test(base_clf)
for base_clf in (MultinomialNB(), SVC(probability=True),
LogisticRegression()):
conduct_test(base_clf, test_predict_proba=True)
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
clf = OneVsRestClassifier(base_clf).fit(X, y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"),
prec,
decimal=2)
assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"),
recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
# Estimator with predict_proba disabled, depending on parameters.
decision_only = OneVsRestClassifier(svm.SVC(probability=False))
assert_false(hasattr(decision_only, 'predict_proba'))
decision_only.fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
assert_true(hasattr(decision_only, 'decision_function'))
# Estimator which can get predict_proba enabled after fitting
gs = GridSearchCV(svm.SVC(probability=False),
param_grid={'probability': [True]})
proba_after_fit = OneVsRestClassifier(gs)
assert_false(hasattr(proba_after_fit, 'predict_proba'))
proba_after_fit.fit(X_train, Y_train)
assert_true(hasattr(proba_after_fit, 'predict_proba'))
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = Y_proba > .5
assert_array_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# Decision function only estimator.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_false(hasattr(decision_only, 'predict_proba'))
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_multilabel_decision_function():
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal((clf.decision_function(X_test) > 0).astype(int),
clf.predict(X_test))
def test_ovr_single_label_decision_function():
X, Y = datasets.make_classification(n_samples=100,
n_features=20,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test = X[80:]
clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train)
assert_array_equal(clf.decision_function(X_test).ravel() > 0,
clf.predict(X_test))
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
for base_classifier in [SVC(kernel='linear', random_state=0), LinearSVC(random_state=0)]:
# SVC has sparse coef with sparse input data
ovr = OneVsRestClassifier(base_classifier)
for X in [iris.data, sp.csr_matrix(iris.data)]:
# test with dense and sparse coef
ovr.fit(X, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
# don't densify sparse coefficients
assert_equal(sp.issparse(ovr.estimators_[0].coef_), sp.issparse(ovr.coef_))
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_on_list():
# Test that OneVsOne fitting works with a list of targets and yields the
# same output as predict from an array
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
prediction_from_array = ovo.fit(iris.data, iris.target).predict(iris.data)
iris_data_list = [list(a) for a in iris.data]
prediction_from_list = ovo.fit(iris_data_list,
list(iris.target)).predict(iris_data_list)
assert_array_equal(prediction_from_array, prediction_from_list)
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_partial_fit_predict():
X, y = shuffle(iris.data, iris.target)
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(X[:100], y[:100], np.unique(y))
ovo1.partial_fit(X[100:], y[100:])
pred1 = ovo1.predict(X)
ovo2 = OneVsOneClassifier(MultinomialNB())
ovo2.fit(X, y)
pred2 = ovo2.predict(X)
assert_equal(len(ovo1.estimators_), n_classes * (n_classes - 1) / 2)
assert_greater(np.mean(y == pred1), 0.65)
assert_almost_equal(pred1, pred2)
# Test when mini-batches don't have all target classes
ovo1 = OneVsOneClassifier(MultinomialNB())
ovo1.partial_fit(iris.data[:60], iris.target[:60], np.unique(iris.target))
ovo1.partial_fit(iris.data[60:], iris.target[60:])
pred1 = ovo1.predict(iris.data)
ovo2 = OneVsOneClassifier(MultinomialNB())
pred2 = ovo2.fit(iris.data, iris.target).predict(iris.data)
assert_almost_equal(pred1, pred2)
assert_equal(len(ovo1.estimators_), len(np.unique(iris.target)))
assert_greater(np.mean(iris.target == pred1), 0.65)
def test_ovo_decision_function():
n_samples = iris.data.shape[0]
ovo_clf = OneVsOneClassifier(LinearSVC(random_state=0))
ovo_clf.fit(iris.data, iris.target)
decisions = ovo_clf.decision_function(iris.data)
assert_equal(decisions.shape, (n_samples, n_classes))
assert_array_equal(decisions.argmax(axis=1), ovo_clf.predict(iris.data))
# Compute the votes
votes = np.zeros((n_samples, n_classes))
k = 0
for i in range(n_classes):
for j in range(i + 1, n_classes):
pred = ovo_clf.estimators_[k].predict(iris.data)
votes[pred == 0, i] += 1
votes[pred == 1, j] += 1
k += 1
# Extract votes and verify
assert_array_equal(votes, np.round(decisions))
for class_idx in range(n_classes):
# For each sample and each class, there only 3 possible vote levels
# because they are only 3 distinct class pairs thus 3 distinct
# binary classifiers.
# Therefore, sorting predictions based on votes would yield
# mostly tied predictions:
assert_true(set(votes[:, class_idx]).issubset(set([0., 1., 2.])))
# The OVO decision function on the other hand is able to resolve
# most of the ties on this data as it combines both the vote counts
# and the aggregated confidence levels of the binary classifiers
# to compute the aggregate decision function. The iris dataset
# has 150 samples with a couple of duplicates. The OvO decisions
# can resolve most of the ties:
assert_greater(len(np.unique(decisions[:, class_idx])), 146)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# Test that ties are broken using the decision function,
# not defaulting to the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
ovo_decision = multi_clf.decision_function(X)
# Classifiers are in order 0-1, 0-2, 1-2
# Use decision_function to compute the votes and the normalized
# sum_of_confidences, which is used to disambiguate when there is a tie in
# votes.
votes = np.round(ovo_decision)
normalized_confidences = ovo_decision - votes
# For the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# For the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# For the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], normalized_confidences[0].argmax())
def test_ovo_ties2():
# test that ties can not only be won by the first two labels
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y_ref = np.array([2, 0, 1, 2])
# cycle through labels so that each label wins once
for i in range(3):
y = (y_ref + i) % 3
multi_clf = OneVsOneClassifier(Perceptron(shuffle=False))
ovo_prediction = multi_clf.fit(X, y).predict(X)
assert_equal(ovo_prediction[0], i % 3)
def test_ovo_string_y():
# Test that the OvO doesn't mess up the encoding of string labels
X = np.eye(4)
y = np.array(['a', 'b', 'c', 'd'])
ovo = OneVsOneClassifier(LinearSVC())
ovo.fit(X, y)
assert_array_equal(y, ovo.predict(X))
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_pairwise_indices():
clf_precomputed = svm.SVC(kernel='precomputed')
X, y = iris.data, iris.target
ovr_false = OneVsOneClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
ovr_false.fit(linear_kernel, y)
n_estimators = len(ovr_false.estimators_)
precomputed_indices = ovr_false.pairwise_indices_
for idx in precomputed_indices:
assert_equal(idx.shape[0] * n_estimators / (n_estimators - 1),
linear_kernel.shape[0])
def test_pairwise_attribute():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC()
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
assert_false(ovr_false._pairwise)
ovr_true = MultiClassClassifier(clf_precomputed)
assert_true(ovr_true._pairwise)
def test_pairwise_cross_val_score():
clf_precomputed = svm.SVC(kernel='precomputed')
clf_notprecomputed = svm.SVC(kernel='linear')
X, y = iris.data, iris.target
for MultiClassClassifier in [OneVsRestClassifier, OneVsOneClassifier]:
ovr_false = MultiClassClassifier(clf_notprecomputed)
ovr_true = MultiClassClassifier(clf_precomputed)
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(ovr_true, linear_kernel, y)
score_linear = cross_val_score(ovr_false, X, y)
assert_array_equal(score_precomputed, score_linear)
|
bsd-3-clause
|
Fireblend/scikit-learn
|
sklearn/semi_supervised/label_propagation.py
|
128
|
15312
|
# coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
|
bsd-3-clause
|
EP-Guy/VisPe
|
vispe/Observation.py
|
1
|
1116
|
"""
Created May 13, 2016
Observation object for calculating satellite and star positions using SkyField.
@author: EP-Guy
"""
import numpy as np
import pandas as pd
from skyfield.api import load
class Observation:
"""Observation object for times and positions of an observer.
Observer is a tuple of strings (lat, lon).
Time may be a list of datetime objects.
"""
def __init__(self, observer, time_list):
self.earth = self._createearth()
self.ts = load.timescale()
self.time = self._createdatearray(time_list)
self.obs = self._createobs(observer)
@staticmethod
def _createearth():
"""Create SkyField earth object for efficiency."""
eph = load('de421.bsp')
return eph['earth']
def _createdatearray(self, time_list):
"""Create SkyField date array from list of datetimes.
Note: this does not properly handle leap seconds
"""
return self.ts.utc(time_list)
def _createobs(self, observer):
"""Create Earth topos object"""
return self.earth.topos(observer[0], observer[1])
|
mit
|
justinfinkle/pydiffexp
|
pydiffexp/gnw/display.py
|
1
|
2570
|
from io import BytesIO
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
from nxpd import draw
from pydiffexp.plot import DEPlot
def get_graph(path):
"""
Get the digraph
:param path:
:return:
"""
# Read the data
net_df = pd.read_csv(path, sep='\t', header=None)
# Set edge colors
net_df[net_df == '+'] = 'green'
net_df[net_df == '-'] = 'red'
net_df.columns = ['source', 'target', 'color']
# Make a networkx diagram
dg = nx.from_pandas_dataframe(net_df, source='source', target='target', create_using=nx.DiGraph(),
edge_attr='color')
return dg
def draw_net(g, dpi=300, **kwargs):
"""
Draw the network digram
:param g:
:param dpi:
:return:
"""
kwargs.setdefault('show', 'ipynb')
kwargs.setdefault('layout', 'neato')
# Set dpi
g.graph['dpi'] = dpi
# todo: if show=True this will return a string to the tmp location of the image
gviz = draw(g, **kwargs)
img = mpimg.imread(BytesIO(gviz.data))
return img
def draw_results(data: pd.DataFrame, perturb, titles, times=None, samey=True, g=None, axarr=None, **kwargs):
"""
:param data:
:param net:
:param perturb:
:param data_dir:
:param times:
:param axarr:
:return:
"""
idx = pd.IndexSlice
data.sort_index(axis=1, inplace=True)
draw_data = data.loc[:, idx[:, :, perturb, :]]
if times:
draw_data = draw_data.loc[:, idx[:, :, :, times]]
y_max = draw_data.values.max()
dep = DEPlot()
nodes = draw_data.index.values
n_axes = len(nodes)
show_net = (g is not None)
if show_net:
net_img = draw_net(g, **kwargs)
n_axes += 1
if axarr is None:
fig, axarr = plt.subplots(1, n_axes, figsize=(15, 5))
for ii, ax in enumerate(axarr.flatten()):
if ii == 0:
ax.imshow(net_img)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.axis('off')
else:
dep.tsplot(draw_data.loc[nodes[ii-show_net]], ax=ax, subgroup='Time', legend=False,
mean_line_dict={'ls': '--'})
ax.set_ylabel('Normalized Expression')
if samey:
ax.set_ylim([0, y_max])
# ax.set_xlabel('')
ax.set_title(nodes[ii-1])
if ii > 1:
ax.get_yaxis().set_visible(False)
if ii > 0:
ax.set_ylabel('')
return axarr
|
gpl-3.0
|
SpaceKatt/CSPLN
|
apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/lib/python2.7/matplotlib/animation.py
|
3
|
19948
|
# TODO:
# * Loop Delay is broken on GTKAgg. This is because source_remove() is not
# working as we want. PyGTK bug?
# * Documentation -- this will need a new section of the User's Guide.
# Both for Animations and just timers.
# - Also need to update http://www.scipy.org/Cookbook/Matplotlib/Animations
# * Blit
# * Currently broken with Qt4 for widgets that don't start on screen
# * Still a few edge cases that aren't working correctly
# * Can this integrate better with existing matplotlib animation artist flag?
# - If animated removes from default draw(), perhaps we could use this to
# simplify initial draw.
# * Example
# * Frameless animation - pure procedural with no loop
# * Need example that uses something like inotify or subprocess
# * Complex syncing examples
# * Movies
# * Library to make movies?
# * RC parameter for config?
# * Can blit be enabled for movies?
# * Need to consider event sources to allow clicking through multiple figures
import itertools
from matplotlib.cbook import iterable
from matplotlib import verbose
class Animation(object):
'''
This class wraps the creation of an animation using matplotlib. It is
only a base class which should be subclassed to provide needed behavior.
*fig* is the figure object that is used to get draw, resize, and any
other needed events.
*event_source* is a class that can run a callback when desired events
are generated, as well as be stopped and started. Examples include timers
(see :class:`TimedAnimation`) and file system notifications.
*blit* is a boolean that controls whether blitting is used to optimize
drawing.
'''
def __init__(self, fig, event_source=None, blit=False):
self._fig = fig
self._blit = blit
# These are the basics of the animation. The frame sequence represents
# information for each frame of the animation and depends on how the
# drawing is handled by the subclasses. The event source fires events
# that cause the frame sequence to be iterated.
self.frame_seq = self.new_frame_seq()
self.event_source = event_source
# Clear the initial frame
self._init_draw()
# Instead of starting the event source now, we connect to the figure's
# draw_event, so that we only start once the figure has been drawn.
self._first_draw_id = fig.canvas.mpl_connect('draw_event', self._start)
# Connect to the figure's close_event so that we don't continue to
# fire events and try to draw to a deleted figure.
self._close_id = self._fig.canvas.mpl_connect('close_event', self._stop)
if blit:
self._setup_blit()
def _start(self, *args):
'''
Starts interactive animation. Adds the draw frame command to the GUI
handler, calls show to start the event loop.
'''
# On start, we add our callback for stepping the animation and
# actually start the event_source. We also disconnect _start
# from the draw_events
self.event_source.add_callback(self._step)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._first_draw_id)
self._first_draw_id = None # So we can check on save
def _stop(self, *args):
# On stop we disconnect all of our events.
if self._blit:
self._fig.canvas.mpl_disconnect(self._resize_id)
self._fig.canvas.mpl_disconnect(self._close_id)
self.event_source.remove_callback(self._step)
self.event_source = None
def save(self, filename, fps=5, codec='mpeg4', clear_temp=True,
frame_prefix='_tmp'):
'''
Saves a movie file by drawing every frame.
*filename* is the output filename, eg :file:`mymovie.mp4`
*fps* is the frames per second in the movie
*codec* is the codec to be used,if it is supported by the output method.
*clear_temp* specifies whether the temporary image files should be
deleted.
*frame_prefix* gives the prefix that should be used for individual
image files. This prefix will have a frame number (i.e. 0001) appended
when saving individual frames.
'''
# Need to disconnect the first draw callback, since we'll be doing
# draws. Otherwise, we'll end up starting the animation.
if self._first_draw_id is not None:
self._fig.canvas.mpl_disconnect(self._first_draw_id)
reconnect_first_draw = True
else:
reconnect_first_draw = False
fnames = []
# Create a new sequence of frames for saved data. This is different
# from new_frame_seq() to give the ability to save 'live' generated
# frame information to be saved later.
# TODO: Right now, after closing the figure, saving a movie won't
# work since GUI widgets are gone. Either need to remove extra code
# to allow for this non-existant use case or find a way to make it work.
for idx,data in enumerate(self.new_saved_frame_seq()):
#TODO: Need to see if turning off blit is really necessary
self._draw_next_frame(data, blit=False)
fname = '%s%04d.png' % (frame_prefix, idx)
fnames.append(fname)
verbose.report('Animation.save: saved frame %d to fname=%s'%(idx, fname), level='debug')
self._fig.savefig(fname)
self._make_movie(filename, fps, codec, frame_prefix)
#Delete temporary files
if clear_temp:
import os
verbose.report('Animation.save: clearing temporary fnames=%s'%str(fnames), level='debug')
for fname in fnames:
os.remove(fname)
# Reconnect signal for first draw if necessary
if reconnect_first_draw:
self._first_draw_id = self._fig.canvas.mpl_connect('draw_event',
self._start)
def ffmpeg_cmd(self, fname, fps, codec, frame_prefix):
# Returns the command line parameters for subprocess to use
# ffmpeg to create a movie
return ['ffmpeg', '-y', '-r', str(fps), '-b', '1800k', '-i',
'%s%%04d.png' % frame_prefix, fname]
def mencoder_cmd(self, fname, fps, codec, frame_prefix):
# Returns the command line parameters for subprocess to use
# mencoder to create a movie
return ['mencoder', 'mf://%s*.png' % frame_prefix, '-mf',
'type=png:fps=%d' % fps, '-ovc', 'lavc', '-lavcopts',
'vcodec=%s' % codec, '-oac', 'copy', '-o', fname]
def _make_movie(self, fname, fps, codec, frame_prefix, cmd_gen=None):
# Uses subprocess to call the program for assembling frames into a
# movie file. *cmd_gen* is a callable that generates the sequence
# of command line arguments from a few configuration options.
from subprocess import Popen, PIPE
if cmd_gen is None:
cmd_gen = self.ffmpeg_cmd
command = cmd_gen(fname, fps, codec, frame_prefix)
verbose.report('Animation._make_movie running command: %s'%' '.join(command))
proc = Popen(command, shell=False,
stdout=PIPE, stderr=PIPE)
proc.wait()
def _step(self, *args):
'''
Handler for getting events. By default, gets the next frame in the
sequence and hands the data off to be drawn.
'''
# Returns True to indicate that the event source should continue to
# call _step, until the frame sequence reaches the end of iteration,
# at which point False will be returned.
try:
framedata = self.frame_seq.next()
self._draw_next_frame(framedata, self._blit)
return True
except StopIteration:
return False
def new_frame_seq(self):
'Creates a new sequence of frame information.'
# Default implementation is just an iterator over self._framedata
return iter(self._framedata)
def new_saved_frame_seq(self):
'Creates a new sequence of saved/cached frame information.'
# Default is the same as the regular frame sequence
return self.new_frame_seq()
def _draw_next_frame(self, framedata, blit):
# Breaks down the drawing of the next frame into steps of pre- and
# post- draw, as well as the drawing of the frame itself.
self._pre_draw(framedata, blit)
self._draw_frame(framedata)
self._post_draw(framedata, blit)
def _init_draw(self):
# Initial draw to clear the frame. Also used by the blitting code
# when a clean base is required.
pass
def _pre_draw(self, framedata, blit):
# Perform any cleaning or whatnot before the drawing of the frame.
# This default implementation allows blit to clear the frame.
if blit:
self._blit_clear(self._drawn_artists, self._blit_cache)
def _draw_frame(self, framedata):
# Performs actual drawing of the frame.
raise NotImplementedError('Needs to be implemented by subclasses to'
' actually make an animation.')
def _post_draw(self, framedata, blit):
# After the frame is rendered, this handles the actual flushing of
# the draw, which can be a direct draw_idle() or make use of the
# blitting.
if blit and self._drawn_artists:
self._blit_draw(self._drawn_artists, self._blit_cache)
else:
self._fig.canvas.draw_idle()
# The rest of the code in this class is to facilitate easy blitting
def _blit_draw(self, artists, bg_cache):
# Handles blitted drawing, which renders only the artists given instead
# of the entire figure.
updated_ax = []
for a in artists:
# If we haven't cached the background for this axes object, do
# so now. This might not always be reliable, but it's an attempt
# to automate the process.
if a.axes not in bg_cache:
bg_cache[a.axes] = a.figure.canvas.copy_from_bbox(a.axes.bbox)
a.axes.draw_artist(a)
updated_ax.append(a.axes)
# After rendering all the needed artists, blit each axes individually.
for ax in set(updated_ax):
ax.figure.canvas.blit(ax.bbox)
def _blit_clear(self, artists, bg_cache):
# Get a list of the axes that need clearing from the artists that
# have been drawn. Grab the appropriate saved background from the
# cache and restore.
axes = set(a.axes for a in artists)
for a in axes:
a.figure.canvas.restore_region(bg_cache[a])
def _setup_blit(self):
# Setting up the blit requires: a cache of the background for the
# axes
self._blit_cache = dict()
self._drawn_artists = []
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
self._post_draw(None, self._blit)
def _handle_resize(self, *args):
# On resize, we need to disable the resize event handling so we don't
# get too many events. Also stop the animation events, so that
# we're paused. Reset the cache and re-init. Set up an event handler
# to catch once the draw has actually taken place.
self._fig.canvas.mpl_disconnect(self._resize_id)
self.event_source.stop()
self._blit_cache.clear()
self._init_draw()
self._resize_id = self._fig.canvas.mpl_connect('draw_event', self._end_redraw)
def _end_redraw(self, evt):
# Now that the redraw has happened, do the post draw flushing and
# blit handling. Then re-enable all of the original events.
self._post_draw(None, self._blit)
self.event_source.start()
self._fig.canvas.mpl_disconnect(self._resize_id)
self._resize_id = self._fig.canvas.mpl_connect('resize_event',
self._handle_resize)
class TimedAnimation(Animation):
'''
:class:`Animation` subclass that supports time-based animation, drawing
a new frame every *interval* milliseconds.
*repeat* controls whether the animation should repeat when the sequence
of frames is completed.
*repeat_delay* optionally adds a delay in milliseconds before repeating
the animation.
'''
def __init__(self, fig, interval=200, repeat_delay=None, repeat=True,
event_source=None, *args, **kwargs):
# Store the timing information
self._interval = interval
self._repeat_delay = repeat_delay
self.repeat = repeat
# If we're not given an event source, create a new timer. This permits
# sharing timers between animation objects for syncing animations.
if event_source is None:
event_source = fig.canvas.new_timer()
event_source.interval = self._interval
Animation.__init__(self, fig, event_source=event_source, *args, **kwargs)
def _step(self, *args):
'''
Handler for getting events.
'''
# Extends the _step() method for the Animation class. If
# Animation._step signals that it reached the end and we want to repeat,
# we refresh the frame sequence and return True. If _repeat_delay is
# set, change the event_source's interval to our loop delay and set the
# callback to one which will then set the interval back.
still_going = Animation._step(self, *args)
if not still_going and self.repeat:
if self._repeat_delay:
self.event_source.remove_callback(self._step)
self.event_source.add_callback(self._loop_delay)
self.event_source.interval = self._repeat_delay
self.frame_seq = self.new_frame_seq()
return True
else:
return still_going
def _stop(self, *args):
# If we stop in the middle of a loop delay (which is relatively likely
# given the potential pause here, remove the loop_delay callback as
# well.
self.event_source.remove_callback(self._loop_delay)
Animation._stop(self)
def _loop_delay(self, *args):
# Reset the interval and change callbacks after the delay.
self.event_source.remove_callback(self._loop_delay)
self.event_source.interval = self._interval
self.event_source.add_callback(self._step)
class ArtistAnimation(TimedAnimation):
'''
Before calling this function, all plotting should have taken place
and the relevant artists saved.
frame_info is a list, with each list entry a collection of artists that
represent what needs to be enabled on each frame. These will be disabled
for other frames.
'''
def __init__(self, fig, artists, *args, **kwargs):
# Internal list of artists drawn in the most recent frame.
self._drawn_artists = []
# Use the list of artists as the framedata, which will be iterated
# over by the machinery.
self._framedata = artists
TimedAnimation.__init__(self, fig, *args, **kwargs)
def _init_draw(self):
# Make all the artists involved in *any* frame invisible
axes = []
for f in self.new_frame_seq():
for artist in f:
artist.set_visible(False)
# Assemble a list of unique axes that need flushing
if artist.axes not in axes:
axes.append(artist.axes)
# Flush the needed axes
for ax in axes:
ax.figure.canvas.draw()
def _pre_draw(self, framedata, blit):
'''
Clears artists from the last frame.
'''
if blit:
# Let blit handle clearing
self._blit_clear(self._drawn_artists, self._blit_cache)
else:
# Otherwise, make all the artists from the previous frame invisible
for artist in self._drawn_artists:
artist.set_visible(False)
def _draw_frame(self, artists):
# Save the artists that were passed in as framedata for the other
# steps (esp. blitting) to use.
self._drawn_artists = artists
# Make all the artists from the current frame visible
for artist in artists:
artist.set_visible(True)
class FuncAnimation(TimedAnimation):
'''
Makes an animation by repeatedly calling a function *func*, passing in
(optional) arguments in *fargs*.
*frames* can be a generator, an iterable, or a number of frames.
*init_func* is a function used to draw a clear frame. If not given, the
results of drawing from the first item in the frames sequence will be
used.
'''
def __init__(self, fig, func, frames=None ,init_func=None, fargs=None,
save_count=None, **kwargs):
if fargs:
self._args = fargs
else:
self._args = ()
self._func = func
# Amount of framedata to keep around for saving movies. This is only
# used if we don't know how many frames there will be: in the case
# of no generator or in the case of a callable.
self.save_count = save_count
# Set up a function that creates a new iterable when needed. If nothing
# is passed in for frames, just use itertools.count, which will just
# keep counting from 0. A callable passed in for frames is assumed to
# be a generator. An iterable will be used as is, and anything else
# will be treated as a number of frames.
if frames is None:
self._iter_gen = itertools.count
elif callable(frames):
self._iter_gen = frames
elif iterable(frames):
self._iter_gen = lambda: iter(frames)
self.save_count = len(frames)
else:
self._iter_gen = lambda: iter(range(frames))
self.save_count = frames
# If we're passed in and using the default, set it to 100.
if self.save_count is None:
self.save_count = 100
self._init_func = init_func
# Needs to be initialized so the draw functions work without checking
self._save_seq = []
TimedAnimation.__init__(self, fig, **kwargs)
# Need to reset the saved seq, since right now it will contain data
# for a single frame from init, which is not what we want.
self._save_seq = []
def new_frame_seq(self):
# Use the generating function to generate a new frame sequence
return self._iter_gen()
def new_saved_frame_seq(self):
# Generate an iterator for the sequence of saved data. If there are
# no saved frames, generate a new frame sequence and take the first
# save_count entries in it.
if self._save_seq:
return iter(self._save_seq)
else:
return itertools.islice(self.new_frame_seq(), self.save_count)
def _init_draw(self):
# Initialize the drawing either using the given init_func or by
# calling the draw function with the first item of the frame sequence.
# For blitting, the init_func should return a sequence of modified
# artists.
if self._init_func is None:
self._draw_frame(self.new_frame_seq().next())
else:
self._drawn_artists = self._init_func()
def _draw_frame(self, framedata):
# Save the data for potential saving of movies.
self._save_seq.append(framedata)
# Make sure to respect save_count (keep only the last save_count around)
self._save_seq = self._save_seq[-self.save_count:]
# Call the func with framedata and args. If blitting is desired,
# func needs to return a sequence of any artists that were modified.
self._drawn_artists = self._func(framedata, *self._args)
|
gpl-3.0
|
giorgiop/scikit-learn
|
sklearn/cluster/spectral.py
|
25
|
18535
|
# -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
|
bsd-3-clause
|
jeremiedecock/fits-viewer
|
setup.py
|
1
|
4153
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FITS viewer
# The MIT License
#
# Copyright (c) 2016 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Here is the procedure to submit updates to PyPI
# ===============================================
#
# 1. Register to PyPI:
#
# $ python3 setup.py register
#
# 2. Upload the source distribution:
#
# $ python3 setup.py sdist upload
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
from fitsviewer import __version__ as VERSION
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries']
# You can either specify manually the list of packages to include in the
# distribution or use "setuptools.find_packages()" to include them
# automatically with a recursive search (from the root directory of the
# project).
PACKAGES = find_packages()
#PACKAGES = ['fitsviewer']
# The following list contains all dependencies that Python will try to
# install with this project
#INSTALL_REQUIRES = ['pyserial >= 2.6', 'docutils >= 0.3']
INSTALL_REQUIRES = ['astropy', ' numpy', 'matplotlib', 'Pillow']
# Entry point can be used to create plugins or to automatically generate
# system commands to call specific functions.
# Syntax: "name_of_the_command_to_make = package.module:function".
#
# For more information: http://www.pythonhosted.org/setuptools/setuptools.html#automatic-script-creation
ENTRY_POINTS = {
'console_scripts': [
'fits2png = fitsviewer.utils.fits2png:main',
'png2fits = fitsviewer.utils.png2fits:main',
],
'gui_scripts': [
'fitsviewer = fitsviewer.gui.tk_matplotlib:main',
]
}
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='[email protected]',
maintainer='Jeremie DECOCK',
maintainer_email='[email protected]',
name='fits-viewer',
description='A lightweight FITS file viewer',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# Where the package can be downloaded
entry_points=ENTRY_POINTS,
include_package_data=True, # Use the MANIFEST.in file
install_requires=INSTALL_REQUIRES,
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
|
mit
|
mikelj/h-store
|
graphs/eviction-amount-sf.py
|
4
|
3893
|
#!/usr/bin/env python
import os
import sys
import re
import logging
import fnmatch
import string
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
from pprint import pprint,pformat
from options import *
import graphutil
import datautil
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## CONFIGURATION
## ==============================================
dictR = {}
dictW = {}
def computeEvictionStats(dataFile):
colMap, csvData = datautil.getCSVData(dataFile)
if len(csvData) == 0: return
pos = dataFile.rfind("/");
dataFile = dataFile[pos + 3:]
if len(csvData) == 0: return
if not dictR.has_key(dataFile):
dictR[dataFile] = []
if not dictW.has_key(dataFile):
dictW[dataFile] = []
for row in csvData:
read = int(row[colMap["ANTICACHE_BYTES_READ"]]) / 1024
write = int(row[colMap["ANTICACHE_BYTES_WRITTEN"]]) / 1024
dictR[dataFile].append(read)
dictW[dataFile].append(write)
print dataFile
print "read: %d" % read
print "write: %d" % write
print
# DEF
def draw_IO_graph(out_path):
fig = plot.figure()
#fig.set_size_inches(8,4.8)
ax = fig.add_subplot(111)
skew = ["2X", "4X", "8X"]
res3 = []
res4 = []
for tp in dictW:
if tp.find("F2") > 0:
res3.append(np.mean(dictR[tp]))
res4.append(np.mean(dictW[tp]))
print tp
print np.mean(dictR[tp])
for tp in dictW:
if tp.find("F8") < 0 and tp.find("F2") < 0:
res3.append(np.mean(dictR[tp]))
res4.append(np.mean(dictW[tp]))
print tp
print np.mean(dictR[tp])
for tp in dictW:
if tp.find("F8") > 0:
res3.append(np.mean(dictR[tp]))
res4.append(np.mean(dictW[tp]))
print tp
print np.mean(dictR[tp])
# \#topic ($K$) & 50 & 100 & 150 \\ \hline %\hline
# PMTLM & 9889.48 & 8966.57 & 8483.49 \\ %\hline
# EUTB & 4932.97 & 4778.50 & 4619.07 \\ %\hline
# COLD(C=100) & 5200.46 & {\bf 4350.95} & 4394.46 \\
x = [0.5,1,1.5]
ax.bar( [i-0.1 for i in x] ,res3,width=0.1,label='timestamps-read',hatch='|',color='g')
ax.bar( [i+0.0 for i in x] ,res4,width=0.1,label='timestamps-write',hatch='|',color='m')
ax.set_ylabel("Disk IO (MB)",fontsize=16, weight='bold')
#ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),ncol=4)
ax.set_xlim([0.2,1.8])
ax.set_ylim([0,1000])
ax.set_xticklabels(skew,fontsize=16)
ax.set_xlabel("Scale factor",fontsize=16, weight='bold')
ax.set_xticks([0.5,1,1.5])
#plt.show()
plot.savefig(out_path)
## ================
## main
## ==============================================
if __name__ == '__main__':
matches = []
for root, dirnames, filenames in os.walk("./prime/voter-sf"):
for filename in fnmatch.filter(filenames, '*memory.csv'):
matches.append(os.path.join(root, filename))
map(computeEvictionStats, matches)
#for tp in dictR:
# print tp
# print "read: %d" % np.mean(dictR[tp])
# print "write: %d" % np.mean(dictW[tp])
#draw_IO_graph("ycsb-INF-IO.pdf")
#draw_IO_graphh("ycsb-T500-IO.pdf")
draw_IO_graph("voter-sf-IO.pdf")
## MAIN
|
gpl-3.0
|
xavierwu/scikit-learn
|
examples/linear_model/plot_multi_task_lasso_support.py
|
249
|
2211
|
#!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
|
bsd-3-clause
|
sequana/sequana
|
sequana/modules_report/bamqc.py
|
1
|
4122
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
# Rachel Legendre <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Report dedicated to BAM file
.. autosummary::
BAMQCModule
"""
import os
from sequana.lazy import pandas as pd
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.bamtools import SAMFlags
from sequana import BAM
from sequana.lazy import pylab
from sequana.utils.datatables_js import DataTable
__all__ = ['BAMQCModule']
class BAMQCModule(SequanaBaseModule):
"""Report dedicated to BAM file
::
from sequana import sequana_data
from sequana.modules_report.bamqc import BAMQCModule
filename = sequana_data("test.bam")
r = BAMQCModule(filename)
r.create_html("test.html")
# report/bam.html is now available
.. todo:: right now, the computation is performed in the class. Ideally,
we would like the computation to happen elsewhere, where a json is stored.
The json would be the input to this class.
"""
def __init__(self, bam_input, output_filename=None):
super().__init__()
self.bam_input = bam_input
self.title = "Bam Report"
self.create_report_content()
self.create_html(output_filename)
def create_report_content(self):
self.sections = list()
self.add_flag_section()
self.add_images_section()
def _computation(self):
self.bam = BAM(self.bam_input)
results = {}
results['alignment_count'] = len(self.bam)
# first, we store the flags
df = self.bam.get_flags_as_df().sum()
df = df.to_frame()
df.columns = ['counter']
sf = SAMFlags()
df['meaning'] = sf.get_meaning()
df = df[['meaning', 'counter']]
results['flags'] = df
return results
self.bam.plot_bar_flags(logy=False, filename=self.directory + os.sep +
"bar_flags.png")
self.bam.plot_bar_mapq(filename=self.directory + os.sep + "bar_mapq.png")
def add_flag_section(self):
data = self._computation()
df = data['flags']
datatable = DataTable(df, "flags", index=True)
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 15,
'scrollCollapse': 'true',
'dom': 'tB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
html = ""
html += "{} {}".format(html_tab, js)
self.sections.append({
"name": "Flags information",
"anchor": "flags",
"content": html
})
def add_images_section(self):
style = "width:65%"
import pylab
pylab.ioff()
def plotter1(filename):
self.bam.plot_bar_flags(logy=True, filename=filename)
html1 = self.create_embedded_png(plotter1, "filename", style=style)
def plotter2(filename):
self.bam.plot_bar_flags(logy=False, filename=filename)
html2 = self.create_embedded_png(plotter2, "filename", style=style)
def plotter3(filename):
self.bam.plot_bar_mapq(filename=filename)
html3 = self.create_embedded_png(plotter3, "filename", style=style)
self.sections.append({
"name": "Image",
"anchor": "table",
"content": html1 + html2 + html3
})
|
bsd-3-clause
|
samuel1208/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
eickenberg/scikit-learn
|
sklearn/lda.py
|
1
|
9529
|
"""
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
from __future__ import print_function
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
from .utils import check_array, check_X_y
__all__ = ['LDA']
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`coef_` : array-like, shape = [rank, n_classes - 1]
Coefficients of the features in the linear decision
function. rank is min(rank_features, n_classes) where
rank_features is the dimensionality of the spaces spanned
by the features (i.e. n_features excluding redundant features).
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes).
`means_` : array-like, shape = [n_classes, n_features]
Class means.
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1).
`scalings_` : array-like, shape = [rank, n_classes - 1]
Scaling of the features in the space spanned by the class
centroids.
`xbar_` : float, shape = [n_features]
Overall mean.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print('warning: the priors do not sum to 1. Renormalizing')
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, axis=0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scalings_ = np.dot(scalings, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(self.coef_ ** 2, axis=1) +
np.log(self.priors_))
return self
def _decision_function(self, X):
X = check_array(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scalings_)
return np.dot(X, self.coef_.T) + self.intercept_
def decision_function(self, X):
"""
This function returns the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = check_array(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scalings_)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function returns posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
|
bsd-3-clause
|
ndingwall/scikit-learn
|
examples/linear_model/plot_polynomial_interpolation.py
|
168
|
2088
|
#!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
colors = ['teal', 'yellowgreen', 'gold']
lw = 2
plt.plot(x_plot, f(x_plot), color='cornflowerblue', linewidth=lw,
label="ground truth")
plt.scatter(x, y, color='navy', s=30, marker='o', label="training points")
for count, degree in enumerate([3, 4, 5]):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, color=colors[count], linewidth=lw,
label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
darionyaphet/flink
|
flink-python/pyflink/fn_execution/beam/beam_coder_impl_slow.py
|
3
|
21307
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pickle
import struct
from typing import Any
from typing import Generator
from typing import List
import pyarrow as pa
from apache_beam.coders.coder_impl import StreamCoderImpl, create_InputStream, create_OutputStream
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.table.types import Row
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
class FlattenRowCoderImpl(StreamCoderImpl):
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._leading_complete_bytes_num = self._field_count // 8
self._remaining_bits_num = self._field_count % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.data_out_stream = create_OutputStream()
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def encode_to_stream(self, iter_value, out_stream, nested):
field_coders = self._field_coders
data_out_stream = self.data_out_stream
for value in iter_value:
self._write_null_mask(value, data_out_stream)
for i in range(self._field_count):
item = value[i]
if item is not None:
field_coders[i].encode_to_stream(item, data_out_stream, nested)
out_stream.write_var_int64(data_out_stream.size())
out_stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, in_stream, nested):
while in_stream.size() > 0:
in_stream.read_var_int64()
yield self._decode_one_row_from_stream(in_stream, nested)
def _decode_one_row_from_stream(self, in_stream: create_InputStream, nested: bool) -> List:
null_mask = self._read_null_mask(in_stream)
return [None if null_mask[idx] else self._field_coders[idx].decode_from_stream(
in_stream, nested) for idx in range(0, self._field_count)]
def _write_null_mask(self, value, out_stream):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def _read_null_mask(self, in_stream):
null_mask = []
null_mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
null_mask.extend(null_mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
null_mask.extend(null_mask_search_table[b][0:remaining_bits_num])
return null_mask
def __repr__(self):
return 'FlattenRowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class RowCoderImpl(FlattenRowCoderImpl):
def __init__(self, field_coders):
super(RowCoderImpl, self).__init__(field_coders)
def encode_to_stream(self, value, out_stream, nested):
field_coders = self._field_coders
self._write_null_mask(value, out_stream)
for i in range(self._field_count):
item = value[i]
if item is not None:
field_coders[i].encode_to_stream(item, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
return Row(*self._decode_one_row_from_stream(in_stream, nested))
def __repr__(self):
return 'RowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class TableFunctionRowCoderImpl(StreamCoderImpl):
def __init__(self, flatten_row_coder):
self._flatten_row_coder = flatten_row_coder
self._field_count = flatten_row_coder._field_count
def encode_to_stream(self, iter_value, out_stream, nested):
for value in iter_value:
if value:
if self._field_count == 1:
value = self._create_tuple_result(value)
self._flatten_row_coder.encode_to_stream(value, out_stream, nested)
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
def decode_from_stream(self, in_stream, nested):
return self._flatten_row_coder.decode_from_stream(in_stream, nested)
@staticmethod
def _create_tuple_result(value: List) -> Generator:
for result in value:
yield (result,)
def __repr__(self):
return 'TableFunctionRowCoderImpl[%s]' % repr(self._flatten_row_coder)
class ArrayCoderImpl(StreamCoderImpl):
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(len(value))
for elem in value:
if elem is None:
out_stream.write_byte(False)
else:
out_stream.write_byte(True)
self._elem_coder.encode_to_stream(elem, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
elements = [self._elem_coder.decode_from_stream(in_stream, nested)
if in_stream.read_byte() else None for _ in range(size)]
return elements
def __repr__(self):
return 'ArrayCoderImpl[%s]' % repr(self._elem_coder)
class PickledBytesCoderImpl(StreamCoderImpl):
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream, nested):
coded_data = pickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream, nested)
def decode_from_stream(self, in_stream, nested):
return self._decode_one_value_from_stream(in_stream, nested)
def _decode_one_value_from_stream(self, in_stream: create_InputStream, nested):
real_data = self.field_coder.decode_from_stream(in_stream, nested)
value = pickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'PickledBytesCoderImpl[%s]' % str(self.field_coder)
class DataStreamStatelessMapCoderImpl(StreamCoderImpl):
def __init__(self, field_coder):
self._field_coder = field_coder
self.data_out_stream = create_OutputStream()
def encode_to_stream(self, iter_value, stream,
nested): # type: (Any, create_OutputStream, bool) -> None
data_out_stream = self.data_out_stream
for value in iter_value:
self._field_coder.encode_to_stream(value, data_out_stream, nested)
stream.write_var_int64(data_out_stream.size())
stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, stream, nested): # type: (create_InputStream, bool) -> Any
while stream.size() > 0:
stream.read_var_int64()
yield self._field_coder.decode_from_stream(stream, nested)
def __repr__(self):
return 'DataStreamStatelessMapCoderImpl[%s]' % repr(self._field_coder)
class DataStreamStatelessFlatMapCoderImpl(StreamCoderImpl):
def __init__(self, field_coder):
self._field_coder = field_coder
def encode_to_stream(self, iter_value, stream,
nested): # type: (Any, create_OutputStream, bool) -> None
for value in iter_value:
self._field_coder.encode_to_stream(value, stream, nested)
def decode_from_stream(self, stream, nested):
return self._field_coder.decode_from_stream(stream, nested)
def __str__(self) -> str:
return 'DataStreamStatelessFlatMapCoderImpl[%s]' % repr(self._field_coder)
class MapCoderImpl(StreamCoderImpl):
def __init__(self, key_coder, value_coder):
self._key_coder = key_coder
self._value_coder = value_coder
def encode_to_stream(self, map_value, out_stream, nested):
out_stream.write_bigendian_int32(len(map_value))
for key in map_value:
self._key_coder.encode_to_stream(key, out_stream, nested)
value = map_value[key]
if value is None:
out_stream.write_byte(True)
else:
out_stream.write_byte(False)
self._value_coder.encode_to_stream(map_value[key], out_stream, nested)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
map_value = {}
for _ in range(size):
key = self._key_coder.decode_from_stream(in_stream, nested)
is_null = in_stream.read_byte()
if is_null:
map_value[key] = None
else:
value = self._value_coder.decode_from_stream(in_stream, nested)
map_value[key] = value
return map_value
def __repr__(self):
return 'MapCoderImpl[%s]' % ' : '.join([repr(self._key_coder), repr(self._value_coder)])
class BigIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int64(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_int64()
class TinyIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('b', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('b', in_stream.read(1))[0]
class SmallIntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('>h', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('>h', in_stream.read(2))[0]
class IntCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_int32()
class BooleanCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_byte(value)
def decode_from_stream(self, in_stream, nested):
return not not in_stream.read_byte()
class FloatCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write(struct.pack('>f', value))
def decode_from_stream(self, in_stream, nested):
return struct.unpack('>f', in_stream.read(4))[0]
class DoubleCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_double(value)
def decode_from_stream(self, in_stream, nested):
return in_stream.read_bigendian_double()
class DecimalCoderImpl(StreamCoderImpl):
def __init__(self, precision, scale):
self.context = decimal.Context(prec=precision)
self.scale_format = decimal.Decimal(10) ** -scale
def encode_to_stream(self, value, out_stream, nested):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = value.quantize(self.scale_format)
bytes_value = str(value).encode("utf-8")
out_stream.write_bigendian_int32(len(bytes_value))
out_stream.write(bytes_value, False)
decimal.setcontext(user_context)
def decode_from_stream(self, in_stream, nested):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
size = in_stream.read_bigendian_int32()
value = decimal.Decimal(in_stream.read(size).decode("utf-8")).quantize(self.scale_format)
decimal.setcontext(user_context)
return value
class BigDecimalCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, stream, nested):
bytes_value = str(value).encode("utf-8")
stream.write_bigendian_int32(len(bytes_value))
stream.write(bytes_value, False)
def decode_from_stream(self, stream, nested):
size = stream.read_bigendian_int32()
value = decimal.Decimal(stream.read(size).decode("utf-8"))
return value
class TupleCoderImpl(StreamCoderImpl):
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
def encode_to_stream(self, value, out_stream, nested):
field_coders = self._field_coders
for i in range(self._field_count):
field_coders[i].encode_to_stream(value[i], out_stream, nested)
def decode_from_stream(self, stream, nested):
decoded_list = [field_coder.decode_from_stream(stream, nested)
for field_coder in self._field_coders]
return (*decoded_list,)
def __repr__(self) -> str:
return 'TupleCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class BinaryCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(len(value))
out_stream.write(value, False)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
return in_stream.read(size)
class CharCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
bytes_value = value.encode("utf-8")
out_stream.write_bigendian_int32(len(bytes_value))
out_stream.write(bytes_value, False)
def decode_from_stream(self, in_stream, nested):
size = in_stream.read_bigendian_int32()
return in_stream.read(size).decode("utf-8")
class DateCoderImpl(StreamCoderImpl):
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(self.date_to_internal(value))
def decode_from_stream(self, in_stream, nested):
value = in_stream.read_bigendian_int32()
return self.internal_to_date(value)
def date_to_internal(self, d):
return d.toordinal() - self.EPOCH_ORDINAL
def internal_to_date(self, v):
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeCoderImpl(StreamCoderImpl):
def encode_to_stream(self, value, out_stream, nested):
out_stream.write_bigendian_int32(self.time_to_internal(value))
def decode_from_stream(self, in_stream, nested):
value = in_stream.read_bigendian_int32()
return self.internal_to_time(value)
def time_to_internal(self, t):
milliseconds = (t.hour * 3600000
+ t.minute * 60000
+ t.second * 1000
+ t.microsecond // 1000)
return milliseconds
def internal_to_time(self, v):
seconds, milliseconds = divmod(v, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, milliseconds * 1000)
class TimestampCoderImpl(StreamCoderImpl):
def __init__(self, precision):
self.precision = precision
def is_compact(self):
return self.precision <= 3
def encode_to_stream(self, value, out_stream, nested):
milliseconds, nanoseconds = self.timestamp_to_internal(value)
if self.is_compact():
assert nanoseconds == 0
out_stream.write_bigendian_int64(milliseconds)
else:
out_stream.write_bigendian_int64(milliseconds)
out_stream.write_bigendian_int32(nanoseconds)
def decode_from_stream(self, in_stream, nested):
if self.is_compact():
milliseconds = in_stream.read_bigendian_int64()
nanoseconds = 0
else:
milliseconds = in_stream.read_bigendian_int64()
nanoseconds = in_stream.read_bigendian_int32()
return self.internal_to_timestamp(milliseconds, nanoseconds)
def timestamp_to_internal(self, timestamp):
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
nanoseconds = microseconds_of_second % 1000 * 1000
return milliseconds, nanoseconds
def internal_to_timestamp(self, milliseconds, nanoseconds):
second, microsecond = (milliseconds // 1000,
milliseconds % 1000 * 1000 + nanoseconds // 1000)
return datetime.datetime.utcfromtimestamp(second).replace(microsecond=microsecond)
class LocalZonedTimestampCoderImpl(TimestampCoderImpl):
def __init__(self, precision, timezone):
super(LocalZonedTimestampCoderImpl, self).__init__(precision)
self.timezone = timezone
def internal_to_timestamp(self, milliseconds, nanoseconds):
return self.timezone.localize(
super(LocalZonedTimestampCoderImpl, self).internal_to_timestamp(
milliseconds, nanoseconds))
class ArrowCoderImpl(StreamCoderImpl):
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
self._resettable_io = ResettableIO()
self._batch_reader = ArrowCoderImpl._load_from_stream(self._resettable_io)
self._batch_writer = pa.RecordBatchStreamWriter(self._resettable_io, self._schema)
self.data_out_stream = create_OutputStream()
self._resettable_io.set_output_stream(self.data_out_stream)
def encode_to_stream(self, iter_cols, out_stream, nested):
data_out_stream = self.data_out_stream
for cols in iter_cols:
self._batch_writer.write_batch(
pandas_to_arrow(self._schema, self._timezone, self._field_types, cols))
out_stream.write_var_int64(data_out_stream.size())
out_stream.write(data_out_stream.get())
data_out_stream._clear()
def decode_from_stream(self, in_stream, nested):
while in_stream.size() > 0:
yield self._decode_one_batch_from_stream(in_stream)
@staticmethod
def _load_from_stream(stream):
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield batch
def _decode_one_batch_from_stream(self, in_stream: create_InputStream) -> List:
self._resettable_io.set_input_bytes(in_stream.read_all(True))
# there is only one arrow batch in the underlying input stream
return arrow_to_pandas(self._timezone, self._field_types, [next(self._batch_reader)])
def __repr__(self):
return 'ArrowCoderImpl[%s]' % self._schema
class PassThroughLengthPrefixCoderImpl(StreamCoderImpl):
def __init__(self, value_coder):
self._value_coder = value_coder
def encode_to_stream(self, value, out: create_OutputStream, nested: bool) -> Any:
self._value_coder.encode_to_stream(value, out, nested)
def decode_from_stream(self, in_stream: create_InputStream, nested: bool) -> Any:
return self._value_coder.decode_from_stream(in_stream, nested)
def get_estimated_size_and_observables(self, value: Any, nested=False):
return 0, []
def __repr__(self):
return 'PassThroughLengthPrefixCoderImpl[%s]' % self._value_coder
|
apache-2.0
|
LiaoPan/scikit-learn
|
examples/linear_model/plot_ransac.py
|
250
|
1673
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
stylianos-kampakis/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
127
|
40813
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
josephcslater/scipy
|
scipy/ndimage/fourier.py
|
25
|
11866
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
return_value = output
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
return_value = output
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return return_value
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return return_value
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional ellipsoid fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return return_value
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier_complex(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return return_value
|
bsd-3-clause
|
pompiduskus/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
262
|
7954
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
colour-science/colour
|
colour/utilities/__init__.py
|
1
|
5150
|
# -*- coding: utf-8 -*-
import sys
from .data_structures import (Lookup, Structure, CaseInsensitiveMapping,
LazyCaseInsensitiveMapping)
from .common import (
handle_numpy_errors, ignore_numpy_errors, raise_numpy_errors,
print_numpy_errors, warn_numpy_errors, ignore_python_warnings, batch,
disable_multiprocessing, multiprocessing_pool, is_matplotlib_installed,
is_networkx_installed, is_openimageio_installed, is_pandas_installed,
is_tqdm_installed, required, is_iterable, is_string, is_numeric,
is_integer, is_sibling, filter_kwargs, filter_mapping, first_item,
get_domain_range_scale, set_domain_range_scale, domain_range_scale,
to_domain_1, to_domain_10, to_domain_100, to_domain_degrees, to_domain_int,
from_range_1, from_range_10, from_range_100, from_range_degrees,
from_range_int, copy_definition, validate_method)
from .verbose import (
ColourWarning, ColourUsageWarning, ColourRuntimeWarning, message_box,
show_warning, warning, runtime_warning, usage_warning, filter_warnings,
suppress_warnings, numpy_print_options, ANCILLARY_COLOUR_SCIENCE_PACKAGES,
ANCILLARY_RUNTIME_PACKAGES, ANCILLARY_DEVELOPMENT_PACKAGES,
ANCILLARY_EXTRAS_PACKAGES, describe_environment)
from .array import (as_array, as_int_array, as_float_array, as_numeric, as_int,
as_float, set_float_precision, set_int_precision,
as_namedtuple, closest_indexes, closest, interval,
is_uniform, in_array, tstack, tsplit, row_as_diagonal,
orient, centroid, fill_nan, ndarray_write, zeros, ones,
full, index_along_last_axis)
from ..algebra.common import (normalise_maximum, vector_dot, matrix_dot,
linear_conversion, linstep_function)
from .metrics import metric_mse, metric_psnr
from colour.utilities.deprecation import ModuleAPI, build_API_changes
from colour.utilities.documentation import is_documentation_building
__all__ = [
'Lookup', 'Structure', 'CaseInsensitiveMapping',
'LazyCaseInsensitiveMapping'
]
__all__ += [
'handle_numpy_errors', 'ignore_numpy_errors', 'raise_numpy_errors',
'print_numpy_errors', 'warn_numpy_errors', 'ignore_python_warnings',
'batch', 'disable_multiprocessing', 'multiprocessing_pool',
'is_matplotlib_installed', 'is_networkx_installed',
'is_openimageio_installed', 'is_pandas_installed', 'is_tqdm_installed',
'required', 'is_iterable', 'is_string', 'is_numeric', 'is_integer',
'is_sibling', 'filter_kwargs', 'filter_mapping', 'first_item',
'get_domain_range_scale', 'set_domain_range_scale', 'domain_range_scale',
'to_domain_1', 'to_domain_10', 'to_domain_100', 'to_domain_degrees',
'to_domain_int', 'from_range_1', 'from_range_10', 'from_range_100',
'from_range_degrees', 'from_range_int', 'copy_definition',
'validate_method'
]
__all__ += [
'ColourWarning', 'ColourUsageWarning', 'ColourRuntimeWarning',
'message_box', 'show_warning', 'warning', 'runtime_warning',
'usage_warning', 'filter_warnings', 'suppress_warnings',
'numpy_print_options', 'ANCILLARY_COLOUR_SCIENCE_PACKAGES',
'ANCILLARY_RUNTIME_PACKAGES', 'ANCILLARY_DEVELOPMENT_PACKAGES',
'ANCILLARY_EXTRAS_PACKAGES', 'describe_environment'
]
__all__ += [
'as_array', 'as_int_array', 'as_float_array', 'as_numeric', 'as_int',
'as_float', 'set_float_precision', 'set_int_precision', 'as_namedtuple',
'closest_indexes', 'closest', 'normalise_maximum', 'interval',
'is_uniform', 'in_array', 'tstack', 'tsplit', 'row_as_diagonal',
'vector_dot', 'matrix_dot', 'orient', 'centroid', 'linear_conversion',
'fill_nan', 'linstep_function', 'ndarray_write', 'zeros', 'ones', 'full',
'index_along_last_axis'
]
__all__ += ['metric_mse', 'metric_psnr']
# ----------------------------------------------------------------------------#
# --- API Changes and Deprecation Management ---#
# ----------------------------------------------------------------------------#
class utilities(ModuleAPI):
def __getattr__(self, attribute):
return super(utilities, self).__getattr__(attribute)
# v0.4.0
API_CHANGES = {
'ObjectFutureAccessChange': [
[
'colour.utilities.linstep_function',
'colour.algebra.linstep_function',
],
[
'colour.utilities.linear_conversion',
'colour.algebra.linear_conversion',
],
[
'colour.utilities.matrix_dot',
'colour.algebra.matrix_dot',
],
[
'colour.utilities.normalise_maximum',
'colour.algebra.normalise_maximum',
],
[
'colour.utilities.vector_dot',
'colour.algebra.vector_dot',
],
]
}
"""
Defines the *colour.utilities* sub-package API changes.
API_CHANGES : dict
"""
if not is_documentation_building():
sys.modules['colour.utilities'] = utilities(
sys.modules['colour.utilities'], build_API_changes(API_CHANGES))
del ModuleAPI, is_documentation_building, build_API_changes, sys
|
bsd-3-clause
|
alongwithyou/auto-sklearn
|
autosklearn/estimators.py
|
5
|
4834
|
import os
import random
import shutil
import numpy as np
import autosklearn.automl
from autosklearn.constants import *
class AutoSklearnClassifier(autosklearn.automl.AutoML):
"""This class implements the classification task. It must not be pickled!
Parameters
----------
time_left_for_this_task : int, optional (default=3600)
Time limit in seconds for the search for appropriate classification
models. By increasing this value, *auto-sklearn* will find better
configurations.
per_run_time_limit : int, optional (default=360)
Time limit for a single call to machine learning model.
initial_configurations_via_metalearning : int, optional (default=25)
ensemble_size : int, optional (default=50)
ensemble_nbest : int, optional (default=50)
seed : int, optional (default=1)
ml_memory_limit : int, optional (3000)
Memory limit for the machine learning algorithm. If the machine
learning algorithm allocates tries to allocate more memory,
its evaluation will be stopped.
"""
def __init__(self, time_left_for_this_task=3600,
per_run_time_limit=360,
initial_configurations_via_metalearning=25,
ensemble_size=50, ensemble_nbest=50, seed=1,
ml_memory_limit=3000):
random_number = random.randint(0, 10000)
pid = os.getpid()
output_dir = "/tmp/autosklearn_output_%d_%d" % (pid, random_number)
tmp_dir = "/tmp/autosklearn_tmp_%d_%d" % (pid, random_number)
os.makedirs(output_dir)
os.makedirs(tmp_dir)
super(AutoSklearnClassifier, self).__init__(
tmp_dir, output_dir, time_left_for_this_task, per_run_time_limit,
log_dir=tmp_dir,
initial_configurations_via_metalearning=initial_configurations_via_metalearning,
ensemble_size=ensemble_size, ensemble_nbest=ensemble_nbest,
seed=seed, ml_memory_limit=ml_memory_limit)
def __del__(self):
self._delete_output_directories()
def _create_output_directories(self):
os.makedirs(self.output_dir)
os.makedirs(self.tmp_dir)
def _delete_output_directories(self):
shutil.rmtree(self.tmp_dir)
shutil.rmtree(self.output_dir)
def fit(self, X, y, metric='acc_metric', feat_type=None):
"""Fit *autosklearn* to given training set (X, y).
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target classes.
metric : str, optional (default='acc_metric')
The metric to optimize for. Can be one of: ['acc_metric',
'auc_metric', 'bac_metric', 'f1_metric', 'pac_metric']
feat_type : list, optional (default=None)
List of :python:`len(X.shape[1])` describing if an attribute is
continuous or categorical. Categorical attributes will
automatically 1Hot encoded.
"""
# Fit is supposed to be idempotent!
self._delete_output_directories()
self._create_output_directories()
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
self.n_classes_ = np.array(self.n_classes_, dtype=np.int)
if self.n_outputs_ > 1:
task = MULTILABEL_CLASSIFICATION
else:
if len(self.classes_[0]) == 2:
task = BINARY_CLASSIFICATION
else:
task = MULTICLASS_CLASSIFICATION
# TODO: fix metafeatures calculation to allow this!
if y.shape[1] == 1:
y = y.flatten()
return super(AutoSklearnClassifier, self).fit(X, y, task, metric,
feat_type)
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
return super(AutoSklearnClassifier, self).predict(X)
class AutoSklearnRegressor(autosklearn.automl.AutoML):
def __init__(self, **kwargs):
raise NotImplementedError()
|
bsd-3-clause
|
lucashtnguyen/wqio
|
wqio/algo/ros.py
|
1
|
16320
|
from __future__ import division
import pdb
import os
import sys
if sys.version_info.major == 3:
from io import StringIO
else:
from StringIO import StringIO
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas
__all__ = ['rosSort', 'MR']
def rosSort(dataframe, rescol='res', qualcol='qual', ndsymbol='ND'):
'''
This function prepares a dataframe for ROS. It sorts ascending with
non-detects on top. something like this:
[2, 4, 4, 10, 3, 5, 6, 10, 12, 40, 78, 120]
where [2, 4, 4, 10] being the ND reults (masked the output).
Input:
dataframe : a pandas dataframe with results and qualifiers.
The qualifiers of the dataframe must have two states:
detect and non-detect.
rescol (default = 'res') : name of the column in the dataframe
that contains result values.
qualcol (default = 'qual') : name of the column in the dataframe
that containes qualifiers. There must be a single, unique
qualifer that indicates that a result is non-detect.
ndsymbol (default = 'ND' : the value in `qualcol` that indicates
that a value in nondetect. *Important*: any other value will
be treated as a detection.
Output:
Sorted dataframe with a dropped index.
'''
# separate detects from non-detects
nondetects = dataframe[dataframe[qualcol] == ndsymbol].sort(columns=rescol)
detects = dataframe[dataframe[qualcol] != ndsymbol].sort(columns=rescol)
# remerge the separated values
ros_data = nondetects.append(detects)
return ros_data #.reset_index(drop=True)
class MR(object):
'''Regressiong on Order Statistics
This class implements the MR method outlined Hirsch and Stedinger (1987)
to estimate the censored (non-detect) values of a dataset. An example
dataset is available via the `utils.ros.getTestData` function.
Parameters
----------
data : pandas DataFrame
The censored dataset for which the non-detect values need to be
estimated.
rescol : optional string (default='res')
The name of the column containing the numerical valuse of the
dataset. Non-detect values should be set to the detection limit.
qualcol : optional string (default='qual')
The name of the column containing the qualifiers marking the
results as censored.
ndsymbol : optional string (default='ND')
The value of the `qualcol` column of `data` that marks as result
as being censored. In processing, all qualifiers that are equal
to `ndsymbol` well be set to 'ND'. All other values will be set
to '='.
Attributes
----------
N_tot : int
Total number of results in the dataset
N_nd : int
Total number of non-detect results in the dataset.
DLs : pandas DataFrame
A DataFrame of the unique detection limits found in `data` along
with the `A`, `B`, `C`, and `PE` quantities computed by the
estimation.
data : pandas DataFrame
An expanded version of the original dataset `data` passed the
constructor. New columns include the plotting positions,
Z-score, and estimated data. Additionally `qualcol` and `rescol`
columns will have been renamed to `qual` and `res`,
respectively. Also the qualifier values will have been
standardized per the `ndsymbol` section above.
debug : pandas DataFrame
A full version of the `data` DataFrame that inlucdes other
quantities computed during the estimation such as the "normal"
and "averaged" ranks and the preliminary Z-score
Examples
--------
>>> from pybmp.utils import ros
>>> myData = ros.MR(dataframe, rescol='Result',
qualcol='Qualifiers', testing=False)
'''
def __init__(self, data, rescol='res', qualcol='qual', ndsymbol='ND',
fitlogs=True, dist='norm'):
def _ros_DL_index(row):
'''
Helper function to create an array of indices for the detection
limits (self.DLs) corresponding to each data point
'''
DLIndex = np.zeros(len(self.data.res))
if self.DLs.shape[0] > 0:
index, = np.where(self.DLs['DL'] <= row['res'])
DLIndex = index[-1]
else:
DLIndex = 0
return DLIndex
if not isinstance(data, pandas.DataFrame):
raise ValueError("Input `data` must be a pandas.DataFrame")
if not data.index.is_unique:
raise ValueError("Index of input DataFrame `data` must be unique")
if data[rescol].min() <= 0:
raise ValueError('All result values of `data` must be positive')
# rename the dataframe columns to the standard names
# these will be used throughout ros.py when convenient
newdata = data.rename(columns={rescol: 'res', qualcol: 'qual'})
# confirm a datatype real quick
try:
newdata.res = np.float64(newdata.res)
except ValueError:
raise ValueError('Result data is not uniformly numeric')
# and get the basic info
self.N_tot = newdata.shape[0]
self.N_nd = newdata[newdata.qual == ndsymbol].shape[0]
# clear out all of the non-ND quals
newdata['qual'] = newdata['qual'].apply(lambda x: 'ND' if x == ndsymbol else '=')
#newdata.qual[newdata.qual != ndsymbol] = '='
#newdata.qual[newdata.qual == ndsymbol] = 'ND'
# sort the data
self.data = rosSort(newdata, rescol='res', qualcol='qual',
ndsymbol=ndsymbol)
self.fitlogs = fitlogs
if isinstance(dist, str):
self.dist = getattr(stats, dist)
else:
self.dist = dist
# create a dataframe of detection limits and their parameters
# used in the ROS estimation
self.DLs = self.cohn()
# create a DLIndex column that references self.DLs
self.data['DLIndex'] = self.data.apply(_ros_DL_index, axis=1)
# compute the ranks of the data
self._ros_ranks()
# comput the plotting positions, z-scores, and final values
self.data = self.estimator()
# create the debug attribute as a copy of the self.data attribute
self.debug = self.data.copy(deep=True)
# select out only the necessary columns for data
self.data = self.data[['final_data', 'res', 'qual']]
def cohn(self):
'''
Creates an array of unique detection limits in the dataset
'''
def _A(row):
'''
Helper function to compute the `A` quantity.
'''
# index of results above the lower DL
above = self.data.res >= row['lower']
# index of results below the upper DL
below = self.data.res < row['upper']
# index of non-detect results
detect = self.data.qual != 'ND'
# return the number of results where all condictions are True
return self.data[above & below & detect].shape[0]
def _B(row):
'''
Helper function to compute the `B` quantity
'''
# index of data less than the lower DL
less_than = self.data.res < row['lower']
# index of data less than or equal to the lower DL
less_thanequal = self.data.res <= row['lower']
# index of detects, non-detects
detect = self.data.qual != 'ND'
nondet = self.data.qual == 'ND'
# number results less than or equal to lower DL and non-detect
LTE_nondets = self.data[less_thanequal & nondet].shape[0]
# number of results less than lower DL and detected
LT_detects = self.data[less_than & detect].shape[0]
# return the sum
return LTE_nondets + LT_detects
def _C(row):
'''
Helper function to compute the `C` quantity
'''
censored_below = self.data.res[self.data.qual == 'ND'] == row['lower']
return censored_below.sum()
# unique values
DLs = pandas.unique(self.data.res[self.data.qual == 'ND'])
# if there is a results smaller than the minimum detection limit,
# add that value to the array
if DLs.shape[0] > 0:
if self.data.res.min() < DLs.min():
DLs = np.hstack([self.data.res.min(), DLs])
# create a dataframe
DLs = pandas.DataFrame(DLs, columns=['DL'])
# copy the DLs in two columns. offset the 2nd (upper) column
DLs['lower'] = DLs['DL']
if DLs.shape[0] > 1:
DLs['upper'] = DLs['DL'].shift(-1)
# fill in the missing values with infinity
DLs.fillna(value=np.inf, inplace=True)
else:
DLs['upper'] = np.inf
# compute A, B, and C
DLs['A'] = DLs.apply(_A, axis=1)
DLs['B'] = DLs.apply(_B, axis=1)
DLs['C'] = DLs.apply(_C, axis=1)
# add an extra row
DLs = DLs.reindex(range(DLs.shape[0]+1))
# add the 'PE' column, initialize with zeros
DLs['PE'] = 0.0
else:
dl_cols = ['DL', 'lower', 'upper', 'A', 'B', 'C', 'PE']
DLs = pandas.DataFrame(np.empty((0,7)), columns=dl_cols)
return DLs
def _ros_ranks(self):
'''
Determine the ranks of the data according to the following logic
rank[n] = rank[n-1] + 1 when:
n is 0 OR
n > 0 and d[n].masked is True and j[n] <> d[n-1] OR
n > 0 and d[n].masked is False and d[n-1].masked is True OR
n > 0 and d[n].masked is False and d[n-1].masked is False and j[n] <> j[n-1]
rank[n] = 1
n > 0 and d[n].masked is True and j[n] == d[n-1] OR
n > 0 and d[n].masked is False and d[n-1].masked is False and j[n] == j[n-1]
where j[n] is the index of the highest DL that is less than the current data value
Then the ranks of non-censored equivalent data values are averaged.
'''
# get the length of the dataset and initialize the normal (raw) ranks
self.data['Norm Ranks'] = float(self.N_tot)
#norm_ranks = np.ones(self.N_tot, dtype='f2')
# loop through each value and compare to the previous value
# see docstring for more info on the logic behind all this
for n, index in enumerate(self.data.index):
if n == 0 \
or self.data['DLIndex'].iloc[n] != self.data['DLIndex'].iloc[n-1] \
or self.data.qual.iloc[n] != self.data.qual.iloc[n-1]:
self.data.loc[index, 'Norm Ranks'] = 1
else:
self.data.loc[index, 'Norm Ranks'] = self.data['Norm Ranks'].iloc[n-1] + 1
# go through each index and see if the value is a detection
# and average the ranks of all equivalent values,
def avgrank(r):
if r['qual'] != 'ND':
index = (self.data.DLIndex == r['DLIndex']) & \
(self.data.res == r['res']) & \
(self.data.qual != 'ND')
return self.data['Norm Ranks'][index].mean()
else:
return r['Norm Ranks']
self.data['Avg Ranks'] = self.data.apply(avgrank, axis=1)
def estimator(self):
'''
Estimates the values of the censored data
'''
def _ros_plotting_pos(row):
'''
Helper function to compute the ROS'd plotting position
'''
dl_1 = self.DLs.iloc[row['DLIndex']]
dl_2 = self.DLs.iloc[row['DLIndex']+1]
if row['qual'] == 'ND':
return (1 - dl_1['PE']) * row['Norm Ranks']/(dl_1['C']+1)
else:
return (1 - dl_1['PE']) + (dl_1['PE'] - dl_2['PE']) * \
row['Norm Ranks'] / (dl_1['A']+1)
def _select_final_data(row):
'''
Helper fucntion to select "final" data from original detects
and estimated non-detects
'''
if row['qual'] == 'ND':
return row['modeled_data']
else:
return row['res']
def _select_half_DLs(row):
'''
Helper function to select half DLs when there are too few detects
'''
if row['qual'] == 'ND':
return 0.5 * row['res']
else:
return row['res']
# detect/non-detect selectors
detect_selector = self.data.qual != 'ND'
nondet_selector = self.data.qual == 'ND'
# if there are no non-detects, just spit everything back out
if self.N_nd == 0:
self.data['final_data'] = self.data['res']
# if there are too few detects, use half DL
elif self.N_tot - self.N_nd < 2 or self.N_nd/self.N_tot > 0.8:
self.data['final_data'] = self.data.apply(_select_half_DLs, axis=1)
# in most cases, actually use the MR method to estimate NDs
else:
# compute the PE values
for j in self.DLs.index[:-1][::-1]:
self.DLs.loc[j, 'PE'] = self.DLs.loc[j+1, 'PE'] + \
self.DLs.loc[j, 'A'] / \
(self.DLs.loc[j, 'A'] + self.DLs.loc[j, 'B']) * \
(1 - self.DLs.loc[j+1, 'PE'])
# compute the plotting position of the data (uses the PE stuff)
self.data['plot_pos'] = self.data.apply(_ros_plotting_pos, axis=1)
# correctly sort the plotting positions of the ND data:
# ND_plotpos = self.data['plot_pos'][self.data['qual'] == 'ND']
# ND_plotpos.values.sort()
# NDs = (self.data.qual == 'ND').index
# self.data['plot_pos'].replace(ND_plotpos, inplace=True)
# estimate a preliminary value of the Z-scores
self.data['Zprelim'] = self.dist.ppf(self.data['plot_pos'])
# fit a line to the logs of the detected data
if self.fitlogs:
detect_vals = np.log(self.data['res'][detect_selector])
else:
detect_vals = self.data['res'][detect_selector]
fit = stats.linregress(self.data['Zprelim'][detect_selector],
detect_vals)
# save the fit params to an attribute
self.fit = fit
# pull out the slope and intercept for use later
slope, intercept = fit[:2]
# model the data based on the best-fit curve
self.data['modeled_data'] = np.exp(
slope*self.data['Zprelim'][nondet_selector] + intercept
)
# select out the final data
self.data['final_data'] = self.data.apply(
_select_final_data,
axis=1
)
return self.data
def plot(self, filename):
'''
makes a simple plot showing the original and modeled data
'''
fig, ax1 = plt.subplots()
ax1.plot(self.data.Z[self.data.qual != 'ND'],
self.data.res[self.data.qual != 'ND'],
'ko', mfc='Maroon', ms=6, label='original detects', zorder=8)
ax1.plot(self.data.Z[self.data.qual == 'ND'],
self.data.res[self.data.qual == 'ND'],
'ko', ms=6, label='original non-detects', zorder=8, mfc='none')
ax1.plot(self.data.Z, self.data.final_data, 'ks', ms=4, zorder=10,
label='modeled data', mfc='DodgerBlue')
ax1.set_xlabel(r'$Z$-score')
ax1.set_ylabel('concentration')
ax1.set_yscale('log')
ax1.legend(loc='upper left', numpoints=1)
ax1.xaxis.grid(True, which='major', ls='-', lw=0.5, alpha=0.35)
ax1.yaxis.grid(True, which='major', ls='-', lw=0.5, alpha=0.35)
ax1.yaxis.grid(True, which='minor', ls='-', lw=0.5, alpha=0.17)
plt.tight_layout()
fig.savefig(filename)
return fig
|
bsd-3-clause
|
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/sklearn/linear_model/stochastic_gradient.py
|
31
|
50760
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (alpha * (t + t0)) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
gpl-2.0
|
JosmanPS/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
157
|
13799
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
|
bsd-3-clause
|
twalthr/flink
|
flink-python/pyflink/table/tests/test_pandas_conversion.py
|
5
|
10777
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table.types import DataTypes
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
class PandasConversionTestBase(object):
@classmethod
def setUpClass(cls):
super(PandasConversionTestBase, cls).setUpClass()
cls.data = [(1, 1, 1, 1, True, 1.1, 1.2, 'hello', bytearray(b"aaa"),
decimal.Decimal('1000000000000000000.01'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2])),
(1, 2, 2, 2, False, 2.1, 2.2, 'world', bytearray(b"bbb"),
decimal.Decimal('1000000000000000000.02'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2]))]
cls.data_type = DataTypes.ROW(
[DataTypes.FIELD("f1", DataTypes.TINYINT()),
DataTypes.FIELD("f2", DataTypes.SMALLINT()),
DataTypes.FIELD("f3", DataTypes.INT()),
DataTypes.FIELD("f4", DataTypes.BIGINT()),
DataTypes.FIELD("f5", DataTypes.BOOLEAN()),
DataTypes.FIELD("f6", DataTypes.FLOAT()),
DataTypes.FIELD("f7", DataTypes.DOUBLE()),
DataTypes.FIELD("f8", DataTypes.STRING()),
DataTypes.FIELD("f9", DataTypes.BYTES()),
DataTypes.FIELD("f10", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("f11", DataTypes.DATE()),
DataTypes.FIELD("f12", DataTypes.TIME()),
DataTypes.FIELD("f13", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f14", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("f15", DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.STRING()),
DataTypes.FIELD("c", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("d", DataTypes.ARRAY(DataTypes.INT()))]))], False)
cls.pdf = cls.create_pandas_data_frame()
@classmethod
def create_pandas_data_frame(cls):
data_dict = {}
for j, name in enumerate(cls.data_type.names):
data_dict[name] = [cls.data[i][j] for i in range(len(cls.data))]
# need convert to numpy types
import numpy as np
data_dict["f1"] = np.int8(data_dict["f1"])
data_dict["f2"] = np.int16(data_dict["f2"])
data_dict["f3"] = np.int32(data_dict["f3"])
data_dict["f4"] = np.int64(data_dict["f4"])
data_dict["f6"] = np.float32(data_dict["f6"])
data_dict["f7"] = np.float64(data_dict["f7"])
data_dict["f15"] = [row.as_dict() for row in data_dict["f15"]]
import pandas as pd
return pd.DataFrame(data=data_dict,
index=[2., 3.],
columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'f10', 'f11', 'f12', 'f13', 'f14', 'f15'])
class PandasConversionTests(PandasConversionTestBase):
def test_from_pandas_with_incorrect_schema(self):
fields = self.data_type.fields.copy()
fields[0], fields[7] = fields[7], fields[0] # swap str with tinyint
wrong_schema = DataTypes.ROW(fields) # should be DataTypes.STRING()
with self.assertRaisesRegex(Exception, "Expected a string.*got int8"):
self.t_env.from_pandas(self.pdf, schema=wrong_schema)
def test_from_pandas_with_names(self):
# skip decimal as currently only decimal(38, 18) is supported
pdf = self.pdf.drop(['f10', 'f11', 'f12', 'f13', 'f14', 'f15'], axis=1)
new_names = list(map(str, range(len(pdf.columns))))
table = self.t_env.from_pandas(pdf, schema=new_names)
self.assertEqual(new_names, table.get_schema().get_field_names())
table = self.t_env.from_pandas(pdf, schema=tuple(new_names))
self.assertEqual(new_names, table.get_schema().get_field_names())
def test_from_pandas_with_types(self):
new_types = self.data_type.field_types()
new_types[0] = DataTypes.BIGINT()
table = self.t_env.from_pandas(self.pdf, schema=new_types)
self.assertEqual(new_types, table.get_schema().get_field_data_types())
table = self.t_env.from_pandas(self.pdf, schema=tuple(new_types))
self.assertEqual(new_types, table.get_schema().get_field_data_types())
class PandasConversionITTests(PandasConversionTestBase):
def test_from_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type, 5)
self.assertEqual(self.data_type, table.get_schema().to_row_data_type())
table = table.filter(table.f2 < 2)
table_sink = source_sink_utils.TestAppendSink(
self.data_type.field_names(),
self.data_type.field_types())
self.t_env.register_table_sink("Results", table_sink)
table.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1, 1, 1, true, 1.1, 1.2, hello, [97, 97, 97], "
"1000000000000000000.010000000000000000, 2014-09-13, 01:00:01, "
"1970-01-01 00:00:00.123, [hello, 中文], +I[1, hello, "
"1970-01-01 00:00:00.123, [1, 2]]]"])
def test_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
result_pdf = table.to_pandas()
result_pdf.index = self.pdf.index
self.assertEqual(2, len(result_pdf))
expected_arrow = self.pdf.to_records(index=False)
result_arrow = result_pdf.to_records(index=False)
for r in range(len(expected_arrow)):
for e in range(len(expected_arrow[r])):
self.assert_equal_field(expected_arrow[r][e], result_arrow[r][e])
def test_empty_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
pdf = table.filter(table.f1 < 0).to_pandas()
self.assertTrue(pdf.empty)
def test_to_pandas_for_retract_table(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
result_pdf = table.group_by(table.f1).select(table.f2.max.alias('f2')).to_pandas()
import pandas as pd
import numpy as np
assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int16([2])}))
result_pdf = table.group_by("f2").select("max(f1) as f2").to_pandas()
assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int8([1, 1])}))
def assert_equal_field(self, expected_field, result_field):
import numpy as np
result_type = type(result_field)
if result_type == dict:
self.assertEqual(expected_field.keys(), result_field.keys())
for key in expected_field:
self.assert_equal_field(expected_field[key], result_field[key])
elif result_type == np.ndarray:
self.assertTrue((expected_field == result_field).all())
else:
self.assertTrue(expected_field == result_field)
class BatchPandasConversionTests(PandasConversionTests,
PandasConversionITTests,
PyFlinkBatchTableTestCase):
pass
class StreamPandasConversionTests(PandasConversionITTests,
PyFlinkStreamTableTestCase):
def test_to_pandas_with_event_time(self):
self.t_env.get_config().get_configuration().set_string("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'2018-03-11 03:10:00',
'2018-03-11 03:10:00',
'2018-03-11 03:10:00',
'2018-03-11 03:40:00',
'2018-03-11 04:20:00',
'2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_to_pandas_with_event_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().get_configuration().set_string(
"pipeline.time-characteristic", "EventTime")
source_table = """
create table source_table(
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
result_pdf = t.to_pandas()
import pandas as pd
os.remove(source_path)
assert_frame_equal(result_pdf, pd.DataFrame(
data={"rowtime": [
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 40),
datetime.datetime(2018, 3, 11, 4, 20),
datetime.datetime(2018, 3, 11, 3, 30),
]}))
|
apache-2.0
|
BoltzmannBrain/Kaggle_Higgs
|
higgsml-run.py
|
1
|
1095
|
#-------------------------------------------------------------------------------
# Name: higgsml-run.py
# Purpose: Run the Higgs classifier
#
# Author: Alexander Lavin
#
# Created: 15/09/2014
# Copyright: (c) Alexander Lavin 2014
# alexanderlavin.com
#-------------------------------------------------------------------------------
def main(gbc):
import math
import pandas as pd
import numpy as np
# Run model on test data and export results csv
print 'Loading and running testing data, writing to csv'
data = pd.read_csv("test.csv")
X_test = data.values[:, 1:]
ids = data.EventId
d = gbc.predict_proba(X_test)[:, 1]
r = np.argsort(d) + 1 # argsort(d) returns the indices that would sort the array d
p = np.empty(len(X_test), dtype=np.object)
p[d > pcut] = 's'
p[d <= pcut] = 'b'
df = pd.DataFrame({"EventId": ids, "RankOrder": r, "Class": p})
df.to_csv("predictions.csv", index=False, cols=["EventId", "RankOrder", "Class"])
return []
if __name__ == '__main__':
main(model)
pass
|
mit
|
bthirion/scikit-learn
|
examples/feature_selection/plot_feature_selection_pipeline.py
|
58
|
1049
|
"""
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
|
bsd-3-clause
|
rtavenar/tslearn
|
tslearn/docs/examples/clustering/plot_kshape.py
|
1
|
1335
|
# -*- coding: utf-8 -*-
"""
KShape
======
This example uses the KShape clustering method [1] that is based on
cross-correlation to cluster time series.
[1] J. Paparrizos & L. Gravano. k-Shape: Efficient and Accurate Clustering \
of Time Series. SIGMOD 2015. pp. 1855-1870.
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
import matplotlib.pyplot as plt
from tslearn.clustering import KShape
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
seed = 0
numpy.random.seed(seed)
X_train, y_train, X_test, y_test = CachedDatasets().load_dataset("Trace")
# Keep first 3 classes and 50 first time series
X_train = X_train[y_train < 4]
X_train = X_train[:50]
numpy.random.shuffle(X_train)
# For this method to operate properly, prior scaling is required
X_train = TimeSeriesScalerMeanVariance().fit_transform(X_train)
sz = X_train.shape[1]
# kShape clustering
ks = KShape(n_clusters=3, verbose=True, random_state=seed)
y_pred = ks.fit_predict(X_train)
plt.figure()
for yi in range(3):
plt.subplot(3, 1, 1 + yi)
for xx in X_train[y_pred == yi]:
plt.plot(xx.ravel(), "k-", alpha=.2)
plt.plot(ks.cluster_centers_[yi].ravel(), "r-")
plt.xlim(0, sz)
plt.ylim(-4, 4)
plt.title("Cluster %d" % (yi + 1))
plt.tight_layout()
plt.show()
|
bsd-2-clause
|
jrbourbeau/cr-composition
|
plotting/plot_response_matrix.py
|
1
|
2662
|
#!/usr/bin/env python
from __future__ import division, print_function
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
import comptools as comp
if __name__ == '__main__':
description = 'Plots response matrix used in unfolding'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=comp.simfunctions.get_sim_configs(),
help='Detector configuration')
parser.add_argument('--num_groups', dest='num_groups', type=int,
default=4, choices=[2, 3, 4],
help='Number of composition groups')
args = parser.parse_args()
config = args.config
num_groups = args.num_groups
# Load response matrix from disk
response_file = os.path.join(comp.paths.comp_data_dir, config,
'unfolding',
'response_{}-groups.txt'.format(num_groups))
response = np.loadtxt(response_file)
response_err_file = os.path.join(comp.paths.comp_data_dir, config,
'unfolding',
'response_err_{}-groups.txt'.format(num_groups))
response_err = np.loadtxt(response_err_file)
# Plot response matrix
fig, ax = plt.subplots()
plt.imshow(response, origin='lower', cmap='viridis')
ax.plot([0, response.shape[0]-1], [0, response.shape[1]-1],
marker='None', ls=':', color='C1')
ax.set_xlabel('True bin')
ax.set_ylabel('Reconstructed bin')
ax.set_title('Response matrix')
plt.colorbar(label='$\mathrm{P(E_i|C_{\mu})}$')
response_plot_outfile = os.path.join(
comp.paths.figures_dir, 'unfolding', config, 'response_matrix',
'response-matrix_{}-groups.png'.format(num_groups))
comp.check_output_dir(response_plot_outfile)
plt.savefig(response_plot_outfile)
# Plot response matrix error
fig, ax = plt.subplots()
plt.imshow(response_err, origin='lower', cmap='viridis')
ax.plot([0, response_err.shape[0]-1], [0, response_err.shape[1]-1],
marker='None', ls=':', color='C1')
ax.set_xlabel('True bin')
ax.set_ylabel('Reconstructed bin')
ax.set_title('Response matrix')
plt.colorbar(label='$\mathrm{\delta P(E_i|C_{\mu})}$')
response_plot_outfile = os.path.join(
comp.paths.figures_dir, 'unfolding', config, 'response_matrix',
'response_err-matrix_{}-groups.png'.format(num_groups))
comp.check_output_dir(response_plot_outfile)
plt.savefig(response_plot_outfile)
|
mit
|
fabioticconi/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
bbfrederick/stabilitycalc
|
stabilityfuncs.py
|
1
|
38385
|
#!/opt/local/bin/python
#
# $Author: frederic $
# $Date: 2011/03/25 15:06:35 $
# $Id: stabilityfuncs.py,v 1.8 2011/03/25 15:06:35 frederic Exp $
#
import sys
import os
import time
import matplotlib
import numpy as np
import scipy as sp
import pylab as P
from nifti import *
from htmltagutils import *
from pylab import plot, legend, show, hold
########################################################################
#
#
# Subroutine definitions
#
#
########################################################################
def setlimits(coil):
#
#
# This is a kludge. There should be separate dictionaries for phantom characteristics and coil characteristics, and all values
# should be read in from configuration files.
#
#
limitdict={}
# sample dependant quantities
limitdict['DopedWaterPhantom_rad']= ((82.0,84.5),(81.0,86.5),(0,0),"Doped water phantom radius","")
limitdict['DopedWaterPhantom_shape']= ((0.97,1.03),(0.95,1.05),(0,0),"Doped water phantom shape","")
limitdict['DopedWaterPhantom_snr']= ((0.97,1.03),(0.95,1.05),(0,0),"Doped water phantom SNR","") # do not yet have normative limits
limitdict['BIRNphantom_rad']= ((80.9209448163338,86.6421984038845),(79.967402551742,87.5957406684762),(0,0),"BIRN phantom radius","")
limitdict['BIRNphantom_shape']= ((0.941427953309648,1.05175367332324),(0.923040333307383,1.07014129332551),(0,0),"BIRN phantom shape","")
limitdict['BIRNphantom_snr']= ((0.97,1.03),(0.95,1.05),(0,0),"BIRN phantom SNR","") # do not yet have normative limits
limitdict['head_rad']= ((60.0,70.0),(50.0,80.0),(0,0),"Head radius","") # do not yet have normative limits
limitdict['head_shape']= ((1.4,1.5),(1.2,1.7),(0,0),"Head shape","") # do not yet have normative limits
limitdict['head_snr']= ((1.4,1.5),(1.2,1.7),(0,0),"Head SNR","") # do not yet have normative limits
# coil independent quantities
limitdict['center_of_mass_x']= ((30.0528824213927,34.071011383917),(29.383194260972,34.7406995443377),(1,0),"Center of mass x","")
limitdict['center_of_mass_y']= ((28.0140073333838,32.9638687728109),(27.1890304268127,33.788845679382),(1,0),"Center of mass y","")
limitdict['center_of_mass_z']= ((12.8178829000925,14.5672481911823),(12.5263220182442,14.8588090730306),(0,0),"Center of mass z","")
# coil dependent quantities
coilrecognized=0
if (coil=='32Ch_Head'):
coilrecognized=1
limitdict['peripheral_angle_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle intensity p-p %","")
limitdict['peripheral_angle_SFNR_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle SFNR p-p %","")
limitdict['central_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Central ROI mean","")
limitdict['central_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Central ROI raw p-p %","")
limitdict['central_roi_raw_std%']= ((0.0,0.15),(0.0,0.25),(0,0),"Central ROI raw stddev %","")
limitdict['central_roi_detrended_mean']= ((600.0,900.0),(500.0,1200.0),(1,1),"Central ROI detrended mean","M")
limitdict['central_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Central ROI detrended p-p %","P")
limitdict['central_roi_detrended_std%']= ((0.0,0.15),(0.0,0.25),(1,0),"Central ROI detrended stddev %","")
limitdict['central_roi_SNR']= ((100.0,100000.0),(75.0,100000.0),(1,1),"Central ROI SNR","S")
limitdict['central_roi_SFNR']= ((238.754898030312,365.602121158555),(217.613694175605,386.743325013262),(0,0),"Central ROI SFNR","F")
limitdict['central_roi_polyfit_lin']= ((-0.00306061155726304,0.00338754176512665),(-0.00413530377766132,0.00446223398552493),(0,0),"Central ROI polyfit linear term","")
limitdict['central_roi_polyfit_quad']= ((-5.21634333271468E-06,5.54843534508463E-06),(-7.0104731123479E-06,7.34256512471785E-06),(0,0),"Central ROI polyfit quadratic term","")
limitdict['central_roi_drift%']= ((0.0,0.536),(0.0,0.688),(1,0),"Central ROI drift %","D")
limitdict['peripheral_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI mean","")
limitdict['peripheral_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Peripheral ROI raw p-p %","")
limitdict['peripheral_roi_raw_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI raw stddev %","")
limitdict['peripheral_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Peripheral ROI detrended p-p %","p")
limitdict['peripheral_roi_detrended_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI detrended stddev %","")
limitdict['peripheral_roi_SNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SNR","")
limitdict['peripheral_roi_SFNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SFNR","")
limitdict['peripheral_roi_polyfit_lin']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit linear term","")
limitdict['peripheral_roi_polyfit_quad']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit quadratic term","")
limitdict['peripheral_roi_drift%']= ((0.0,0.397),(0.0,0.507),(1,0),"Peripheral ROI drift %","d")
limitdict['odd_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost mean","")
limitdict['odd_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost stddev","")
limitdict['odd_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost min","")
limitdict['odd_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost max","")
limitdict['odd_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost p-p","")
limitdict['odd_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Odd ghost p-p %","")
limitdict['even_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost mean","")
limitdict['even_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost stddev","")
limitdict['even_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost min","")
limitdict['even_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost max","")
limitdict['even_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost p-p","")
limitdict['even_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Even ghost p-p %","")
limitdict['weissrdc']= ((0.0,5.0),(0.0,10.0),(1,0),"Weisskoff radius of decorrelation","W")
if (coil=='HeadMatrix'):
coilrecognized=1
limitdict['peripheral_angle_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle intensity p-p %","")
limitdict['peripheral_angle_SFNR_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle SFNR p-p %","")
limitdict['central_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Central ROI mean","")
limitdict['central_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Central ROI raw p-p %","")
limitdict['central_roi_raw_std%']= ((0.0,0.15),(0.0,0.25),(0,0),"Central ROI raw stddev %","")
limitdict['central_roi_detrended_mean']= ((1100.0,1600.0),(900.0,1800.0),(1,1),"Central ROI detrended mean","M")
limitdict['central_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Central ROI detrended p-p %","P")
limitdict['central_roi_detrended_std%']= ((0.0,0.15),(0.0,0.25),(1,0),"Central ROI detrended stddev %","")
limitdict['central_roi_SNR']= ((250.0,100000.0),(200.0,100000.0),(1,1),"Central ROI SNR","S")
limitdict['central_roi_SFNR']= ((238.754898030312,365.602121158555),(217.613694175605,386.743325013262),(0,0),"Central ROI SFNR","F")
limitdict['central_roi_polyfit_lin']= ((-0.00306061155726304,0.00338754176512665),(-0.00413530377766132,0.00446223398552493),(0,0),"Central ROI polyfit linear term","")
limitdict['central_roi_polyfit_quad']= ((-5.21634333271468E-06,5.54843534508463E-06),(-7.0104731123479E-06,7.34256512471785E-06),(0,0),"Central ROI polyfit quadratic term","")
limitdict['central_roi_drift%']= ((0.0,0.536),(0.0,0.688),(1,0),"Central ROI drift %","D")
limitdict['peripheral_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI mean","")
limitdict['peripheral_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Peripheral ROI raw p-p %","")
limitdict['peripheral_roi_raw_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI raw stddev %","")
limitdict['peripheral_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Peripheral ROI detrended p-p %","p")
limitdict['peripheral_roi_detrended_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI detrended stddev %","")
limitdict['peripheral_roi_SNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SNR","")
limitdict['peripheral_roi_SFNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SFNR","")
limitdict['peripheral_roi_polyfit_lin']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit linear term","")
limitdict['peripheral_roi_polyfit_quad']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit quadratic term","")
limitdict['peripheral_roi_drift%']= ((0.0,0.397),(0.0,0.507),(1,0),"Peripheral ROI drift %","d")
limitdict['odd_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost mean","")
limitdict['odd_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost stddev","")
limitdict['odd_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost min","")
limitdict['odd_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost max","")
limitdict['odd_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost p-p","")
limitdict['odd_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Odd ghost p-p %","O")
limitdict['even_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost mean","")
limitdict['even_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost stddev","")
limitdict['even_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost min","")
limitdict['even_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost max","")
limitdict['even_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost p-p","")
limitdict['even_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Even ghost p-p %","E")
limitdict['weissrdc']= ((0.0,5.0),(0.0,10.0),(1,0),"Weisskoff radius of decorrelation","W")
if (coil=='TxRx_Head'):
coilrecognized=1
limitdict['peripheral_angle_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle intensity p-p %","")
limitdict['peripheral_angle_SFNR_p-p%']= ((0,10.0),(0,10.0),(0,0),"Peripheral angle SFNR p-p %","")
limitdict['central_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Central ROI mean","")
limitdict['central_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Central ROI raw p-p %","")
limitdict['central_roi_raw_std%']= ((0.0,0.15),(0.0,0.25),(0,0),"Central ROI raw stddev %","")
limitdict['central_roi_detrended_mean']= ((1200.0,1750.0),(1000.0,2000.0),(1,1),"Central ROI detrended mean","M")
limitdict['central_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Central ROI detrended p-p %","P")
limitdict['central_roi_detrended_std%']= ((0.0,0.15),(0.0,0.25),(1,0),"Central ROI detrended stddev %","")
limitdict['central_roi_SNR']= ((300.0,100000.0),(250.0,100000.0),(1,1),"Central ROI SNR","S")
limitdict['central_roi_SFNR']= ((238.754898030312,365.602121158555),(217.613694175605,386.743325013262),(0,0),"Central ROI SFNR","F")
limitdict['central_roi_polyfit_lin']= ((-0.00306061155726304,0.00338754176512665),(-0.00413530377766132,0.00446223398552493),(0,0),"Central ROI polyfit linear term","")
limitdict['central_roi_polyfit_quad']= ((-5.21634333271468E-06,5.54843534508463E-06),(-7.0104731123479E-06,7.34256512471785E-06),(0,0),"Central ROI polyfit quadratic term","")
limitdict['central_roi_drift%']= ((0.0,0.536),(0.0,0.688),(1,0),"Central ROI drift %","D")
limitdict['peripheral_roi_mean']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI mean","")
limitdict['peripheral_roi_raw_p-p%']= ((0.0,0.5),(0.0,0.6),(0,0),"Peripheral ROI raw p-p %","")
limitdict['peripheral_roi_raw_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI raw stddev %","")
limitdict['peripheral_roi_detrended_p-p%']= ((0.0,0.5),(0.0,0.6),(1,1),"Peripheral ROI detrended p-p %","p")
limitdict['peripheral_roi_detrended_std%']= ((0,0.125),(0,0.15),(0,0),"Peripheral ROI detrended stddev %","")
limitdict['peripheral_roi_SNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SNR","s")
limitdict['peripheral_roi_SFNR']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI SFNR","f")
limitdict['peripheral_roi_polyfit_lin']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit linear term","")
limitdict['peripheral_roi_polyfit_quad']= ((100.0,200.0),(50.0,300.0),(0,0),"Peripheral ROI polyfit quadratic term","")
limitdict['peripheral_roi_drift%']= ((0.0,0.397),(0.0,0.507),(1,0),"Peripheral ROI drift %","d")
limitdict['odd_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost mean","")
limitdict['odd_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost stddev","")
limitdict['odd_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost min","")
limitdict['odd_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost max","")
limitdict['odd_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Odd ghost p-p","")
limitdict['odd_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Odd ghost p-p %","O")
limitdict['even_ghost_mean']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost mean","")
limitdict['even_ghost_std']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost stddev","")
limitdict['even_ghost_min']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost min","")
limitdict['even_ghost_max']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost max","")
limitdict['even_ghost_p-p']= ((0.0,5.0),(0.0,10.0),(0,0),"Even ghost p-p","")
limitdict['even_ghost_p-p%']= ((0.0,5.0),(0.0,10.0),(1,0),"Even ghost p-p %","E")
limitdict['weissrdc']= ((0.0,5.0),(0.0,10.0),(1,0),"Weisskoff radius of decorrelation","W")
if (coilrecognized!=1):
print "setlimit: coil not recognized!"
exit(1)
return(limitdict)
def getphasedarrayelementdata():
coildata = \
[[ 'H1' , 32.2 , 29.9 , 13.7083425891 , 42.964948901 , 28.0100913861 , 22.9417612371 , 0.686342669504 , -0.143547282337 , 0.534641437457 ], \
[ 'H2' , 32.2 , 29.9 , 13.7083425891 , 37.4016170534 , 40.0389875195 , 25.2266763628 , 0.329400144891 , 0.551096968041 , 0.665197834262 ], \
[ 'H3' , 32.2 , 29.9 , 13.7083425891 , 43.6957159186 , 33.6969950084 , 26.15480836 , 0.664143251597 , 0.17892303095 , 0.705504191638 ], \
[ 'H4' , 32.2 , 29.9 , 13.7083425891 , 39.342511644 , 21.4716900377 , 26.4535954444 , 0.408581359381 , -0.518626182677 , 0.7350831935 ], \
[ 'H5' , 32.2 , 29.9 , 13.7083425891 , 28.5459466637 , 20.1905428172 , 26.7164612498 , -0.215855031737 , -0.574544271594 , 0.750606415082 ], \
[ 'H6' , 32.2 , 29.9 , 13.7083425891 , 23.2546980209 , 28.3256535192 , 25.9350485719 , -0.504637383003 , -0.0943597595371 , 0.702834443926 ], \
[ 'H7' , 32.2 , 29.9 , 13.7083425891 , 27.6210265294 , 35.977287542 , 25.2607177856 , -0.224100358067 , 0.316439931525 , 0.660225547255 ], \
[ 'H8' , 32.2 , 29.9 , 13.7083425891 , 38.4843581961 , 45.8973588485 , 20.57984148 , 0.363066912593 , 0.843334807426 , 0.38764496171 ], \
[ 'H9' , 32.2 , 29.9 , 13.7083425891 , 45.9251163156 , 40.984705969 , 19.1136264104 , 0.78716533336 , 0.542183643167 , 0.287741125976 ], \
[ 'H10' , 32.2 , 29.9 , 13.7083425891 , 48.9834153261 , 29.8642029302 , 20.3661341716 , 0.929188910642 , -0.0367319735672 , 0.359200955381 ], \
[ 'H11' , 32.2 , 29.9 , 13.7083425891 , 45.8034369362 , 19.9407813836 , 19.0872747851 , 0.757026293583 , -0.586929646958 , 0.27874667611 ], \
[ 'H12' , 32.2 , 29.9 , 13.7083425891 , 35.8661495329 , 13.0043690248 , 20.4367020402 , 0.194472924692 , -0.906785402977 , 0.362258329936 ], \
[ 'H13' , 32.2 , 29.9 , 13.7083425891 , 23.5995114328 , 15.1173719171 , 20.0598240878 , -0.508431587393 , -0.773156034202 , 0.349057659982 ], \
[ 'H14' , 32.2 , 29.9 , 13.7083425891 , 16.7668641226 , 21.6244127374 , 19.1025665617 , -0.8396244467 , -0.454997163282 , 0.287006658507 ], \
[ 'H15' , 32.2 , 29.9 , 13.7083425891 , 15.0575673462 , 31.6428114281 , 20.2198560277 , -0.92682121424 , 0.0949103241031 , 0.353099548259 ], \
[ 'H16' , 32.2 , 29.9 , 13.7083425891 , 20.152672608 , 42.5840922143 , 19.0117148923 , -0.64979307412 , 0.698135579607 , 0.291258872889 ], \
[ 'H17' , 32.2 , 29.9 , 13.7083425891 , 28.8123057976 , 47.5514803475 , 20.6258547932 , -0.15527645077 , 0.904234683939 , 0.392908991401 ], \
[ 'H18' , 32.2 , 29.9 , 13.7083425891 , 36.9193476743 , 46.6423682447 , 12.3135387543 , 0.301537943365 , 0.851712451111 , -0.0702748530681 ], \
[ 'H19' , 32.2 , 29.9 , 13.7083425891 , 50.5680421244 , 36.0475571125 , 12.8565693456 , 0.95718730761 , 0.274281135701 , -0.0339557259062 ], \
[ 'H20' , 32.2 , 29.9 , 13.7083425891 , 51.1123027103 , 24.8984550162 , 12.9983951787 , 0.953967068348 , -0.294732115662 , -0.0222483507792 ], \
[ 'H21' , 32.2 , 29.9 , 13.7083425891 , 42.9897279881 , 14.2187631383 , 12.7987799925 , 0.600246890071 , -0.796184902727 , -0.0329639248684 ], \
[ 'H22' , 32.2 , 29.9 , 13.7083425891 , 31.0853307034 , 13.3681516475 , 12.8478366965 , -0.0817298438145 , -0.989760146416 , -0.0273984930497 ], \
[ 'H23' , 32.2 , 29.9 , 13.7083425891 , 21.1624486687 , 14.3378945165 , 13.0668031136 , -0.631896347923 , -0.771970379824 , -0.0174681436609 ], \
[ 'H24' , 32.2 , 29.9 , 13.7083425891 , 12.4410656549 , 27.1218000635 , 12.8424860854 , -0.985547677503 , -0.154727937506 , -0.0335470290739 ], \
[ 'H25' , 32.2 , 29.9 , 13.7083425891 , 14.9558069749 , 38.3093946903 , 12.6730259116 , -0.886481739897 , 0.456950323494 , -0.0411898753107 ], \
[ 'H26' , 32.2 , 29.9 , 13.7083425891 , 24.388553153 , 47.4206495081 , 12.1140818206 , -0.405073084648 , 0.908230801413 , -0.0723320106235 ], \
[ 'H27' , 32.2 , 29.9 , 13.7083425891 , 48.909653207 , 31.3813336211 , 6.41880935329 , 0.919150132757 , 0.0393627617215 , -0.373128714066 ], \
[ 'H28' , 32.2 , 29.9 , 13.7083425891 , 45.9374501414 , 20.9978126842 , 6.85625931307 , 0.770545964052 , -0.52008435123 , -0.354460723764 ], \
[ 'H29' , 32.2 , 29.9 , 13.7083425891 , 39.9108607093 , 13.7648262199 , 9.75553405599 , 0.410816532572 , -0.868628737298 , -0.197864098767 ], \
[ 'H30' , 32.2 , 29.9 , 13.7083425891 , 25.4615393928 , 14.1787376984 , 8.51062106553 , -0.279532913232 , -0.873602338852 , -0.258031795216 ], \
[ 'H31' , 32.2 , 29.9 , 13.7083425891 , 16.891971643 , 22.1771607521 , 5.75521104587 , -0.815064663484 , -0.382622015372 , -0.408913118778 ], \
[ 'H32' , 32.2 , 29.9 , 13.7083425891 , 16.1416671821 , 32.9061734579 , 5.86933917773 , -0.893075950616 , 0.167898676664 , -0.401084216668 ], \
[ 'H1P' , 32.0 , 30.8 , 13.7040225613 , 39.3099248626 , 37.6563795847 , 16.3467547135 , 0.479931606088 , 0.384604308487 , 0.164586949593 ], \
[ 'H2P' , 32.0 , 30.8 , 13.7040225613 , 40.4912112885 , 39.5227617618 , 16.2696454637 , 0.545311566783 , 0.496448258246 , 0.167134718355 ], \
[ 'H3P' , 32.0 , 30.8 , 13.7040225613 , 43.8403017673 , 32.5545271047 , 16.1373848907 , 0.74505792762 , 0.0809980813256 , 0.190449585541 ], \
[ 'H4P' , 32.0 , 30.8 , 13.7040225613 , 40.9056354712 , 29.6925025343 , 15.7152747417 , 0.572279360204 , -0.075538193924 , 0.127866077223 ]]
return(coildata)
def freqanalysis(thetimecourse):
thefftsignal=abs(fft(thetimecourse))
thelen=len(thefftsignal)
thefftnoise=(thefftsignal[0:thelen-2]+thefftsignal[2:thelen])/2.0
return()
def makemask(inputim,inputthresh,useabs):
if (useabs < 1):
#print "using relative threshold"
thethreshval = getfracval(inputim,inputthresh)
#print "%2.2f percent threshold at %2.2f" % (100.0*inputthresh,thethreshval)
else:
thethreshval = inputthresh
themask = sp.where(inputim > thethreshval, 1.0, 0.0)
return(themask)
def vecnorm(thevec):
return(np.sqrt(np.square(thevec).sum()))
# format limits
def formatlimits(thelimits):
limitdesc=thelimits[3]
warnmin=str(thelimits[0][0])
warnmax=str(thelimits[0][1])
failmin=str(thelimits[1][0])
failmax=str(thelimits[1][1])
return("\""+limitdesc+"\","+failmin+","+warnmin+","+warnmax+","+failmax)
# check to see if a parameter falls within preset limits.
def limitcheck(thenumber,thelimits):
retval=2 # start with the assumption that the data is bad
if((float(thenumber)>=float(thelimits[1][0])) and (float(thenumber)<=float(thelimits[1][1]))):
retval=1 # number falls within the warning limits
if((float(thenumber)>=float(thelimits[0][0])) and (float(thenumber)<=float(thelimits[0][1]))):
retval=0 # number falls within the good limits
#print thelimits[1][0],thelimits[0][0],thenumber,thelimits[0][1],thelimits[1][1],"--->",retval
return(retval)
# generate a table of weisskoff data
def weisstable(roiareas,weisscvs,projcvs):
theshape=roiareas.shape
numareas=theshape[0]
tablestring=tablerowtag(
tableentrytag("Region Size")+
tableentrytag("Predicted Std Dev")+
tableentrytag("Actual Std Dev")+
tableentrytag("Ratio")
)
for i in range(0,numareas):
tablestring=tablestring+tablerowtag(
tableentrytag("%d" % (roiareas[i]))+
tableentrytag("%.4f" %(projcvs[i]))+
tableentrytag("%.4f" %(weisscvs[i]))+
tableentrytag("%.4f" %(weisscvs[i]/projcvs[i])))
return(smalltag(tablepropstag(tablestring,300,"center")))
# generate the polynomial fit timecourse from the coefficients
def trendgen(thexvals,thefitcoffs):
theshape=thefitcoffs.shape
order = theshape[0]-1
#print "fitting to order "+str(order)
thepoly=thexvals
thefit=0.0*thexvals
if order>0:
for i in range(1,order+1):
#print "fitting component "+str(i)+", coff="+str(thefitcoffs[order-i])
thefit = thefit + thefitcoffs[order-i]*thepoly
thepoly = np.multiply(thepoly, thexvals)
return(thefit)
# calculate the robust range of the all voxels
def completerobust(thearray):
themin=getfracval(thearray,0.02)
themax=getfracval(thearray,0.98)
return([themin,themax])
# calculate the robust range of the non-zero voxels
def nzrobust(thearray):
themin=getnzfracval(thearray,0.02)
themax=getnzfracval(thearray,0.98)
return([themin,themax])
# calculate the min and max of the non-zero voxels
def nzminmax(thearray):
flatarray=np.ravel(thearray)
nzindices=np.nonzero(flatarray)
theflatarray = flatarray[nzindices]
themax = np.max(theflatarray)
themin = np.min(theflatarray)
return([themin,themax])
# calculate the stats of the non-zero voxels
def completestats(thearray):
themean = np.mean(thearray)
thestddev = np.std(thearray)
thevar = np.var(thearray)
themax = np.max(thearray)
themin = np.min(thearray)
theptp = np.ptp(thearray)
return([themean,thestddev,thevar,themax,themin,theptp])
# calculate the stats of the non-zero voxels
def nzstats(thearray):
flatarray=np.ravel(thearray)
nzindices=np.nonzero(flatarray)
theflatarray = flatarray[nzindices]
themean = np.mean(theflatarray)
thestddev = np.std(theflatarray)
thevar = np.var(theflatarray)
themax = np.max(theflatarray)
themin = np.min(theflatarray)
theptp = np.ptp(theflatarray)
return([themean,thestddev,thevar,themax,themin,theptp])
def showstats(thestats):
formatstring = "mean = %2.2f, stddev = %2.2f, max = %2.2f, min = %2.2f"
interpstring = (thestats[0],thestats[1],thestats[3],thestats[4])
return(formatstring % interpstring)
# calculate the mean of the non-zero voxels
def nzmean(thearray):
flatarray=np.ravel(thearray)
nzindices=np.nonzero(flatarray)
return(np.mean(flatarray[nzindices]))
# calculate the sum of an array across space
def arrayspatialsum(thearray):
return(np.sum(thearray))
# show an roi timecourse plot
def showtc(thexvals,theyvals,thelabel):
w, h = P.figaspect(0.25)
roiplot = P.figure(figsize=(w,h))
roisubplot = roiplot.add_subplot(111)
roisubplot.plot(thexvals, theyvals, 'b')
roisubplot.grid(True)
#roisubplot.axes.Subplot.set_pad(0.1)
for tick in roisubplot.xaxis.get_major_ticks():
tick.label1.set_fontsize(20)
for tick in roisubplot.yaxis.get_major_ticks():
tick.label1.set_fontsize(20)
roisubplot.set_title(thelabel,fontsize=30)
return()
# show an roi timecourse plot and a fit line
def showvals(xvecs,yvecs,legendvec,specvals,thelabel,dolegend):
numxs=len(xvecs)
numys=len(yvecs)
numlegends=len(legendvec)
numspecvals=len(specvals)
if (numxs!=numys) or (numxs!=numlegends) or (numxs!=numspecvals):
print "dimensions do not match"
exit(1)
w, h = P.figaspect(0.50)
roiplot = P.figure(figsize=(w,h))
roisubplot = roiplot.add_subplot(111)
if numys==1:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
if numys==2:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0], xvecs[1], yvecs[1], specvals[1])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
if numys==3:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0], xvecs[1], yvecs[1], specvals[1], xvecs[2], yvecs[2], specvals[2])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
if numys==4:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0], xvecs[1], yvecs[1], specvals[1], xvecs[2], yvecs[2], specvals[2], xvecs[3], yvecs[3], specvals[3])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
if numys==5:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0], xvecs[1], yvecs[1], specvals[1], xvecs[2], yvecs[2], specvals[2], xvecs[3], yvecs[3], specvals[3], xvecs[4], yvecs[4], specvals[4])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
if numys==6:
roisubplot.plot(xvecs[0], yvecs[0], specvals[0], xvecs[1], yvecs[1], specvals[1], xvecs[2], yvecs[2], specvals[2], xvecs[3], yvecs[3], specvals[3], xvecs[4], yvecs[4], specvals[4], xvecs[5], yvecs[5], specvals[5])
hold(True)
if dolegend:
legend(legendvec)
hold(False)
roisubplot.grid(True)
for tick in roisubplot.xaxis.get_major_ticks():
tick.label1.set_fontsize(20)
for tick in roisubplot.yaxis.get_major_ticks():
tick.label1.set_fontsize(20)
roisubplot.set_title(thelabel,fontsize=30)
return()
# show an roi timecourse plot and a fit line
def showtc2(thexvals,theyvals,thefitvals,thelabel):
w, h = P.figaspect(0.25)
roiplot = P.figure(figsize=(w,h))
roisubplot = roiplot.add_subplot(111)
roisubplot.plot(thexvals, theyvals, 'b', thexvals, thefitvals, 'g')
roisubplot.grid(True)
#roisubplot.axes.Subplot.set_pad(0.1)
for tick in roisubplot.xaxis.get_major_ticks():
tick.label1.set_fontsize(20)
for tick in roisubplot.yaxis.get_major_ticks():
tick.label1.set_fontsize(20)
roisubplot.set_title(thelabel,fontsize=30)
return()
# initialize and show a loglog Weiskoff plot
def showweisskoff(theareas,thestddevs,theprojstddevs,thelabel):
w, h = P.figaspect(1.0)
roiplot = P.figure(figsize=(w,h))
roiplot.subplots_adjust(hspace=0.35)
roisubplot = roiplot.add_subplot(111)
thestddevs=thestddevs+0.00000001
roisubplot.loglog(theareas, thestddevs, 'r', theareas, theprojstddevs, 'k', basex=10)
roisubplot.grid(True)
#roiplot.title(thelabel)
return()
# initialize and show a 2D slice from a dataset in greyscale
def showslice2(thedata,thelabel,minval,maxval,colormap):
theshape=thedata.shape
numslices=theshape[0]
ysize=theshape[1]
xsize=theshape[2]
slicesqrt=int(np.ceil(np.sqrt(numslices)))
theslice=np.zeros((ysize*slicesqrt,xsize*slicesqrt))
for i in range(0,numslices):
ypos=int(i/slicesqrt)*ysize
xpos=int(i%slicesqrt)*xsize
theslice[ypos:ypos+ysize,xpos:xpos+xsize]=thedata[i,:,:]
if P.isinteractive():
P.ioff()
P.axis('off')
P.axis('equal')
P.subplots_adjust(hspace=0.0)
P.axes([0,0,1,1], frameon = False)
if (colormap==0):
thecmap=P.cm.gray
else:
mycmdata1 = {
'red' : ((0., 0., 0.), (0.5, 1.0, 0.0), (1., 1., 1.)),
'green': ((0., 0., 0.), (0.5, 1.0, 1.0), (1., 0., 0.)),
'blue' : ((0., 0., 0.), (0.5, 1.0, 0.0), (1., 0., 0.))
}
thecmap = P.matplotlib.colors.LinearSegmentedColormap('mycm', mycmdata1)
#thecmap=P.cm.spectral
theimptr = P.imshow(theslice, vmin=minval, vmax=maxval, interpolation='nearest', label=thelabel, aspect='equal', cmap=thecmap)
#P.colorbar()
return()
# initialize and show a 2D slice from a dataset in greyscale
def showslice3(thedata,thelabel,minval,maxval,colormap):
theshape=thedata.shape
ysize=theshape[0]
xsize=theshape[1]
theslice=np.zeros((ysize,xsize))
if P.isinteractive():
P.ioff()
P.axis('off')
P.axis('equal')
P.subplots_adjust(hspace=0.0)
P.axes([0,0,1,1], frameon = False)
if (colormap==0):
thecmap=P.cm.gray
else:
mycmdata1 = {
'red' : ((0., 0., 0.), (0.5, 1.0, 0.0), (1., 1., 1.)),
'green': ((0., 0., 0.), (0.5, 1.0, 1.0), (1., 0., 0.)),
'blue' : ((0., 0., 0.), (0.5, 1.0, 0.0), (1., 0., 0.))
}
thecmap = P.matplotlib.colors.LinearSegmentedColormap('mycm', mycmdata1)
#thecmap=P.cm.spectral
theimptr = P.imshow(thedata, vmin=minval, vmax=maxval, interpolation='nearest', label=thelabel, aspect='equal', cmap=thecmap)
#P.colorbar()
return()
# show a 2D slice from a dataset in greyscale
def showslice(theslice):
if P.isinteractive():
P.ioff()
P.axis('off')
P.axis('equal')
P.axis('tight')
P.imshow(theslice, interpolation='nearest', aspect='equal', cmap=P.cm.gray)
P.colorbar()
return()
def smooth(x,window_len=11,window='hanning'):
# this routine comes from a scipy.org Cookbook
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
# Find the image intensity value that cleanly separates background from image
def findsepval(datamat):
numbins=200
themax = datamat.max()
themin = datamat.min()
(meanhist,bins) = np.histogram(datamat,bins=numbins,range=(themin,themax))
smoothhist = smooth(meanhist)
currentpos = int(numbins*0.05)
minval=smoothhist[currentpos]
for i in range(currentpos+1,numbins):
if(smoothhist[i] < smoothhist[currentpos]):
currentpos=i
if(smoothhist[i] > 1.2 * smoothhist[currentpos]):
break
cummeanhist = np.cumsum(meanhist)
#print "curpos %d, cummeanhist[curpos] %2.2f, cummeanhist[numbins-1] %d" % (currentpos, cummeanhist[currentpos], cummeanhist[numbins-1])
cummeanhist[currentpos]
cumfrac=(1.0*cummeanhist[currentpos])/(1.0*cummeanhist[numbins-1])
sepval=bins[currentpos]
return([sepval,cumfrac])
# Find the image intensity value which thefrac of the non-zero voxels in the image exceed
def getfracval(datamat,thefrac):
numbins=200
themax = datamat.max()
themin = datamat.min()
(meanhist,bins) = np.histogram(datamat,bins=numbins,range=(themin,themax))
cummeanhist = np.cumsum(meanhist)
target = cummeanhist[numbins-1]*thefrac
for i in range(0,numbins):
if cummeanhist[i]>=target:
return(bins[i])
return(0.0)
# Find the image intensity value which thefrac of the non-zero voxels in the image exceed
def getnzfracval(datamat,thefrac):
numbins=200
(themin,themax) = nzminmax(datamat)
(meanhist,bins) = np.histogram(datamat,bins=numbins,range=(themin,themax))
cummeanhist = np.cumsum(meanhist)
target = cummeanhist[numbins-1]*thefrac
for i in range(0,numbins):
if cummeanhist[i]>=target:
return(bins[i])
return(0.0)
# find the center of mass of a 2D or 3D image
def findCOM(datamat):
Mx = 0.0
My = 0.0
Mz = 0.0
mass = 0.0
val = 0.0
arrdims=np.shape(datamat)
if datamat.ndim==2:
for i in range(0,arrdims[0]):
for j in range(0,arrdims[1]):
val = datamat[i,j]
My += (i * val)
Mx += (j * val)
mass += val
COM = (Mx/mass , My/mass, 0.0)
if datamat.ndim==3:
for i in range(0,arrdims[0]):
for j in range(0,arrdims[1]):
for k in range(0,arrdims[2]):
val = datamat[i,j,k]
Mz += (i * val)
My += (j * val)
Mx += (k * val)
mass += val
COM = (Mx/mass , My/mass, Mz/mass)
return COM
# given an roi and a position, mark an roi
def markroi(theinputroi,zpos,roislice,theval):
xstart=theinputroi[0][0]
xend=theinputroi[1][0]
ystart=theinputroi[0][1]
yend=theinputroi[1][1]
roislice[zpos,ystart:yend,xstart:xend]=theval
return
# given a location and a size, define the corners of an roi
def setroilims(xpos,ypos,size):
if (size%2)==0:
halfsize=size/2
return(((int(round(xpos-halfsize)),int(round(ypos-halfsize))),
(int(round(xpos+halfsize)),int(round(ypos+halfsize)))))
else:
halfsize=(size-1)/2
return(((int(round(xpos-halfsize)),int(round(ypos-halfsize))),
(int(round(xpos+halfsize+1)),int(round(ypos+halfsize+1)))))
# get an snr timecourse from the voxels of an roi
def getroisnr(theimage,theroi,zpos):
xstart=theroi[0][0]
xend=theroi[1][0]
ystart=theroi[0][1]
yend=theroi[1][1]
thesubreg=theimage[:,zpos,ystart:yend,xstart:xend]
theshape=thesubreg.shape
numtimepoints=theshape[0]
themeans=np.zeros(numtimepoints)
thestddevs=np.zeros(numtimepoints)
themax=np.zeros(numtimepoints)
themin=np.zeros(numtimepoints)
thesnrs=np.zeros(numtimepoints)
timeindex=np.arange(0,numtimepoints)
for i in timeindex:
themeans[i]=np.mean(np.ravel(thesubreg[i,:,:]))
thestddevs[i]=np.std(np.ravel(thesubreg[i,:,:]))
themax[i]=np.max(np.ravel(thesubreg[i,:,:]))
themin[i]=np.min(np.ravel(thesubreg[i,:,:]))
thesnrs[i]=themeans[i]/thestddevs[i]
return(thesnrs)
# get all the voxels from an roi and return a 2d (time by space) array
def getroivoxels(theimage,theroi,zpos):
xstart=theroi[0][0]
xend=theroi[1][0]
ystart=theroi[0][1]
yend=theroi[1][1]
thesubreg=theimage[:,zpos,ystart:yend,xstart:xend]
theshape=thesubreg.shape
numtimepoints=theshape[0]
thevoxels=np.zeros((numtimepoints,theshape[1]*theshape[2]))
timeindex=np.arange(0,numtimepoints)
for i in timeindex:
thevoxels[i,:]=np.ravel(thesubreg[i,:,:])
return(thevoxels)
# get a standard deviation timecourse from the voxels of an roi
def getroistdtc(theimage,theroi,zpos):
xstart=theroi[0][0]
xend=theroi[1][0]
ystart=theroi[0][1]
yend=theroi[1][1]
thesubreg=theimage[:,zpos,ystart:yend,xstart:xend]
theshape=thesubreg.shape
numtimepoints=theshape[0]
thestds=np.zeros(numtimepoints)
timeindex=np.arange(0,numtimepoints)
for i in timeindex:
thestds[i]=np.std(np.ravel(thesubreg[i,:,:]))
return(thestds)
# get an average timecourse from the voxels of an roi
def getroimeantc(theimage,theroi,zpos):
xstart=theroi[0][0]
xend=theroi[1][0]
ystart=theroi[0][1]
yend=theroi[1][1]
thesubreg=theimage[:,zpos,ystart:yend,xstart:xend]
theshape=thesubreg.shape
numtimepoints=theshape[0]
themeans=np.zeros(numtimepoints)
timeindex=np.arange(0,numtimepoints)
for i in timeindex:
themeans[i]=np.mean(np.ravel(thesubreg[i,:,:]))
return(themeans)
# get the average value from an roi in a 3D image
def getroival(theimage,theroi,zpos):
xstart=theroi[0][0]
xend=theroi[1][0]
ystart=theroi[0][1]
yend=theroi[1][1]
theroival=np.mean(theimage[zpos,ystart:yend,xstart:xend])
return(theroival)
# make a captioned image with statistics
def makecaptionedimage(imagetitle,thestats,imagename,thewidth):
if(thestats==[]):
imcapstring = paratag(boldtag(imagetitle))
else:
imcapstring = paratag(boldtag(imagetitle) + breaktag(showstats(thestats)))
return(imcapstring + imagetag(imagename,thewidth))
# send a command to the shell
def doashellcmd(cmd):
a = os.popen(cmd)
while 1:
line = a.readline()
if not line: break
retval = line[:-1]
return retval
|
bsd-3-clause
|
shangwuhencc/scikit-learn
|
sklearn/preprocessing/tests/test_imputation.py
|
213
|
11911
|
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
bsd-3-clause
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/pandas/tools/hashing.py
|
7
|
4609
|
"""
data hash pandas / numpy objects
"""
import numpy as np
from pandas import _hash, Series, factorize, Categorical, Index
from pandas.lib import infer_dtype
from pandas.types.generic import ABCIndexClass, ABCSeries, ABCDataFrame
from pandas.types.common import is_categorical_dtype
# 16 byte long hashing key
_default_hash_key = '0123456789123456'
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None):
"""
Return a data hash of the Index/Series/DataFrame
.. versionadded:: 0.19.2
Parameters
----------
index : boolean, default True
include the index in the hash (if Series/DataFrame)
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
Returns
-------
Series of uint64, same length as the object
"""
if hash_key is None:
hash_key = _default_hash_key
def adder(h, hashed_to_add):
h = np.multiply(h, np.uint(3), h)
return np.add(h, hashed_to_add, h)
if isinstance(obj, ABCIndexClass):
h = hash_array(obj.values, encoding, hash_key).astype('uint64')
h = Series(h, index=obj, dtype='uint64')
elif isinstance(obj, ABCSeries):
h = hash_array(obj.values, encoding, hash_key).astype('uint64')
if index:
h = adder(h, hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key).values)
h = Series(h, index=obj.index, dtype='uint64')
elif isinstance(obj, ABCDataFrame):
cols = obj.iteritems()
first_series = next(cols)[1]
h = hash_array(first_series.values, encoding,
hash_key).astype('uint64')
for _, col in cols:
h = adder(h, hash_array(col.values, encoding, hash_key))
if index:
h = adder(h, hash_pandas_object(obj.index,
index=False,
encoding=encoding,
hash_key=hash_key).values)
h = Series(h, index=obj.index, dtype='uint64')
else:
raise TypeError("Unexpected type for hashing %s" % type(obj))
return h
def hash_array(vals, encoding='utf8', hash_key=None):
"""
Given a 1d array, return an array of deterministic integers.
.. versionadded:: 0.19.2
Parameters
----------
vals : ndarray
encoding : string, default 'utf8'
encoding for data & key when strings
hash_key : string key to encode, default to _default_hash_key
Returns
-------
1d uint64 numpy array of hash values, same length as the vals
"""
# work with cagegoricals as ints. (This check is above the complex
# check so that we don't ask numpy if categorical is a subdtype of
# complex, as it will choke.
if hash_key is None:
hash_key = _default_hash_key
if is_categorical_dtype(vals.dtype):
vals = vals.codes
# we'll be working with everything as 64-bit values, so handle this
# 128-bit value early
if np.issubdtype(vals.dtype, np.complex128):
return hash_array(vals.real) + 23 * hash_array(vals.imag)
# MAIN LOGIC:
inferred = infer_dtype(vals)
# First, turn whatever array this is into unsigned 64-bit ints, if we can
# manage it.
if inferred == 'boolean':
vals = vals.astype('u8')
if (np.issubdtype(vals.dtype, np.datetime64) or
np.issubdtype(vals.dtype, np.timedelta64) or
np.issubdtype(vals.dtype, np.number)) and vals.dtype.itemsize <= 8:
vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8')
else:
# its MUCH faster to categorize object dtypes, then hash and rename
codes, categories = factorize(vals, sort=False)
categories = Index(categories)
c = Series(Categorical(codes, categories,
ordered=False, fastpath=True))
vals = _hash.hash_object_array(categories.values,
hash_key,
encoding)
# rename & extract
vals = c.cat.rename_categories(Index(vals)).astype(np.uint64).values
# Then, redistribute these 64-bit ints within the space of 64-bit ints
vals ^= vals >> 30
vals *= np.uint64(0xbf58476d1ce4e5b9)
vals ^= vals >> 27
vals *= np.uint64(0x94d049bb133111eb)
vals ^= vals >> 31
return vals
|
mit
|
CforED/Machine-Learning
|
benchmarks/bench_sgd_regression.py
|
283
|
5569
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
bsd-3-clause
|
tomhunter-gh/Lean
|
Algorithm.Python/PythonPackageTestAlgorithm.py
|
2
|
6403
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
# Libraries included with basic python install
from bisect import bisect
import cmath
import collections
import copy
import functools
import heapq
import itertools
import math
import operator
import pytz
import Queue
import re
import time
import zlib
# Third party libraries added with pip
from sklearn.ensemble import RandomForestClassifier
import blaze # includes sqlalchemy, odo
import numpy
import scipy
import cvxopt
import cvxpy
from pykalman import KalmanFilter
import statsmodels.api as sm
import talib
from copulalib.copulalib import Copula
import theano
import xgboost
from arch import arch_model
from keras.models import Sequential
from keras.layers import Dense, Activation
import tensorflow as tf
class PythonPackageTestAlgorithm(QCAlgorithm):
'''Algorithm to test third party libraries'''
def Initialize(self):
self.SetStartDate(2013, 10, 7) #Set Start Date
self.SetStartDate(2013, 10, 7) #Set End Date
self.AddEquity("SPY", Resolution.Daily)
# numpy test
print "numpy test >>> print numpy.pi: " , numpy.pi
# scipy test:
print "scipy test >>> print mean of 1 2 3 4 5:", scipy.mean(numpy.array([1, 2, 3, 4, 5]))
#sklearn test
print "sklearn test >>> default RandomForestClassifier:", RandomForestClassifier()
# cvxopt matrix test
print "cvxopt >>>", cvxopt.matrix([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], (2,3))
# talib test
print "talib test >>>", talib.SMA(numpy.random.random(100))
# blaze test
blaze_test()
# cvxpy test
cvxpy_test()
# statsmodels test
statsmodels_test()
# pykalman test
pykalman_test()
# copulalib test
copulalib_test()
# theano test
theano_test()
# xgboost test
xgboost_test()
# arch test
arch_test()
# keras test
keras_test()
# tensorflow test
tensorflow_test()
def OnData(self, data): pass
def blaze_test():
accounts = blaze.symbol('accounts', 'var * {id: int, name: string, amount: int}')
deadbeats = accounts[accounts.amount < 0].name
L = [[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]]
print "blaze test >>>", list(blaze.compute(deadbeats, L))
def grade(score, breakpoints=[60, 70, 80, 90], grades='FDCBA'):
i = bisect(breakpoints, score)
return grades[i]
def cvxpy_test():
numpy.random.seed(1)
n = 10
mu = numpy.abs(numpy.random.randn(n, 1))
Sigma = numpy.random.randn(n, n)
Sigma = Sigma.T.dot(Sigma)
w = cvxpy.Variable(n)
gamma = cvxpy.Parameter(sign='positive')
ret = mu.T*w
risk = cvxpy.quad_form(w, Sigma)
print "csvpy test >>> ", cvxpy.Problem(cvxpy.Maximize(ret - gamma*risk),
[cvxpy.sum_entries(w) == 1,
w >= 0])
def statsmodels_test():
nsample = 100
x = numpy.linspace(0, 10, 100)
X = numpy.column_stack((x, x**2))
beta = numpy.array([1, 0.1, 10])
e = numpy.random.normal(size=nsample)
X = sm.add_constant(X)
y = numpy.dot(X, beta) + e
model = sm.OLS(y, X)
results = model.fit()
print "statsmodels tests >>>", results.summary()
def pykalman_test():
kf = KalmanFilter(transition_matrices = [[1, 1], [0, 1]], observation_matrices = [[0.1, 0.5], [-0.3, 0.0]])
measurements = numpy.asarray([[1,0], [0,0], [0,1]]) # 3 observations
kf = kf.em(measurements, n_iter=5)
print "pykalman test >>>", kf.filter(measurements)
def copulalib_test():
x = numpy.random.normal(size=100)
y = 2.5 * x + numpy.random.normal(size=100)
#Make the instance of Copula class with x, y and clayton family::
print "copulalib test >>>", Copula(x, y, family='clayton')
def theano_test():
a = theano.tensor.vector() # declare variable
out = a + a ** 10 # build symbolic expression
f = theano.function([a], out) # compile function
print "theano test >>>", f([0, 1, 2])
def xgboost_test():
data = numpy.random.rand(5,10) # 5 entities, each contains 10 features
label = numpy.random.randint(2, size=5) # binary target
print "xgboost test >>>", xgboost.DMatrix( data, label=label)
def arch_test():
r = numpy.array([0.945532630498276,
0.614772790142383,
0.834417758890680,
0.862344782601800,
0.555858715401929,
0.641058419842652,
0.720118656981704,
0.643948007732270,
0.138790608092353,
0.279264178231250,
0.993836948076485,
0.531967023876420,
0.964455754192395,
0.873171802181126,
0.937828816793698])
garch11 = arch_model(r, p=1, q=1)
res = garch11.fit(update_freq=10)
print "arch test >>>", res.summary()
def keras_test():
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(12, activation='relu', input_shape=(11,)))
# Add one hidden layer
model.add(Dense(8, activation='relu'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
print "keras test >>>", model
def tensorflow_test():
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # also tf.float32 implicitly
sess = tf.Session()
node3 = tf.add(node1, node2)
print "tensorflow test >>>", "sess.run(node3): ", sess.run(node3)
|
apache-2.0
|
revan/facerecserver
|
visual.py
|
1
|
2936
|
from normalization import minmax
import os as os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import PIL.Image as Image
import math as math
def create_font(fontname='Tahoma', fontsize=10):
return { 'fontname': fontname, 'fontsize':fontsize }
def plot_gray(X, sz=None, filename=None):
if not sz is None:
X = X.reshape(sz)
X = minmax(I, 0, 255)
fig = plt.figure()
implot = plt.imshow(np.asarray(Ig), cmap=cm.gray)
if filename is None:
plt.show()
else:
fig.savefig(filename, format="png", transparent=False)
def plot_eigenvectors(eigenvectors, num_components, sz, filename=None, start_component=0, rows = None, cols = None, title="Subplot", color=True):
if (rows is None) or (cols is None):
rows = cols = int(math.ceil(np.sqrt(num_components)))
num_components = np.min(num_components, eigenvectors.shape[1])
fig = plt.figure()
for i in range(start_component, num_components):
vi = eigenvectors[0:,i].copy()
vi = minmax(np.asarray(vi), 0, 255, dtype=np.uint8)
vi = vi.reshape(sz)
ax0 = fig.add_subplot(rows,cols,(i-start_component)+1)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
plt.title("%s #%d" % (title, i), create_font('Tahoma',10))
if color:
implot = plt.imshow(np.asarray(vi))
else:
implot = plt.imshow(np.asarray(vi), cmap=cm.grey)
if filename is None:
fig.show()
else:
fig.savefig(filename, format="png", transparent=False)
def subplot(title, images, rows, cols, sptitle="subplot", sptitles=[], colormap=cm.gray, ticks_visible=True, filename=None):
fig = plt.figure()
# main title
fig.text(.5, .95, title, horizontalalignment='center')
for i in xrange(len(images)):
ax0 = fig.add_subplot(rows,cols,(i+1))
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax0.get_yticklabels(), visible=False)
if len(sptitles) == len(images):
plt.title("%s #%s" % (sptitle, str(sptitles[i])), create_font('Tahoma',10))
else:
plt.title("%s #%d" % (sptitle, (i+1)), create_font('Tahoma',10))
plt.imshow(np.asarray(images[i]), cmap=colormap)
if filename is None:
plt.show()
else:
fig.savefig(filename)
# using plt plot:
#filename="/home/philipp/facerec/at_database_vs_accuracy_xy.png"
#t = np.arange(2., 10., 1.)
#fig = plt.figure()
#plt.plot(t, r0, 'k--', t, r1, 'k')
#plt.legend(("Eigenfaces", "Fisherfaces"), 'lower right', shadow=True, fancybox=True)
#plt.ylim(0,1)
#plt.ylabel('Recognition Rate')
#plt.xlabel('Database Size (Images per Person)')
#fig.savefig(filename, format="png", transparent=False)
#plt.show()
|
bsd-3-clause
|
feranick/GES_AT
|
Other/Cursors/cursors2.py
|
1
|
2307
|
# -*- noplot -*-
"""
This example shows how to use matplotlib to provide a data cursor. It
uses matplotlib to draw the cursor and may be a slow since this
requires redrawing the figure with every mouse move.
Faster cursoring is possible using native GUI drawing, as in
wxcursor_demo.py.
The mpldatacursor and mplcursors third-party packages can be used to achieve a
similar effect. See
https://github.com/joferkington/mpldatacursor
https://github.com/anntzer/mplcursors
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
class Cursor(object):
def __init__(self, ax):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
plt.draw()
class SnaptoCursor(object):
"""
Like Cursor but the crosshair snaps to the nearest x,y point
For simplicity, I'm assuming x is sorted
"""
def __init__(self, ax, x, y):
self.ax = ax
self.lx = ax.axhline(color='k') # the horiz line
self.ly = ax.axvline(color='k') # the vert line
self.x = x
self.y = y
# text location in axes coords
self.txt = ax.text(0.7, 0.9, '', transform=ax.transAxes)
def mouse_move(self, event):
if not event.inaxes:
return
x, y = event.xdata, event.ydata
indx = np.searchsorted(self.x, [x])[0]
x = self.x[indx]
y = self.y[indx]
# update the line positions
self.lx.set_ydata(y)
self.ly.set_xdata(x)
self.txt.set_text('x=%1.2f, y=%1.2f' % (x, y))
print('x=%1.2f, y=%1.2f' % (x, y))
plt.draw()
t = np.arange(0.0, 1.0, 0.01)
s = np.sin(2*2*np.pi*t)
fig, ax = plt.subplots()
#cursor = Cursor(ax)
cursor = SnaptoCursor(ax, t, s)
plt.connect('motion_notify_event', cursor.mouse_move)
ax.plot(t, s, 'o')
plt.axis([0, 1, -1, 1])
plt.show()
|
gpl-3.0
|
averagehat/samtools
|
bqd.py
|
2
|
6177
|
#!/usr/bin/env python
from subprocess import Popen, PIPE
import sys
import json
from collections import namedtuple
from builtins import zip
from matplotlib.lines import Line2D
# Alias our region strings
G = 'Gap'
N = 'Normal'
LC = 'LowCoverage'
LQ = 'LowQuality'
LCQ = 'LowCovQual'
# As a list
REGIONTYPES = [
G, N, LC, LQ, LCQ
]
def mpileup( bamfile, regionstr=None ):
'''
.. todo::
This needs to go as samtools.mpileup exists
'''
cmd = ['samtools','mpileup', '-d', '100000']
if regionstr:
cmd += ['-r',regionstr]
cmd.append( bamfile )
p = Popen( cmd, stdout=PIPE )
return p.stdout
def parse_pileup( pileup ):
'''
.. todo::
This needs to be replaced by samtools stuff
Parses the raw pileup output from samtools mpileup and returns a dictionary
with stats for every reference in the pileup
- maxd/mind - max/min depth found for that reference
- maxq/minq - max/min quality found for that reference
- depths - depth at each base position
- avgquals - average quality at each base position
- length - length of reference
@pileup - file like object that returns lines from samtools mpileup
@returns dictionary {'ref1': {maxd:0,mind:0,maxq:0,minq:0,depths:[],avgquals:[],length:0}, 'ref2':...}
'''
refs = {}
lastpos = {}
for line in pileup:
info = line.rstrip().split('\t')
# Depth is 0
if len( info ) == 4:
ref, pos, n, depth = info
seq = ''
quals = ''
elif len( info ) == 6:
ref,pos,n,depth,seq,quals = info
else:
raise ValueError( "mpileup line {0} is unparseable".format(line) )
# Initialize new reference
if ref not in refs:
lastpos[ref] = 0
refs[ref] = {
'maxd': 0,
'mind': 8000,
'maxq': 0,
'minq': 99,
'depths': [],
'avgquals': [],
'length': 0
}
pos = int(pos)
# Fill in gaps with blanks
if lastpos[ref] != pos-1:
refs[ref]['mind'] = 0
refs[ref]['minq'] = 0.0
# From the position 1 past the last
# all the way up to the current
for i in range( lastpos[ref]+1, pos ):
refs[ref]['depths'].append( 0 )
refs[ref]['avgquals'].append( 0.0 )
refs[ref]['length'] += 1
depth = int(depth)
refs[ref]['maxd'] = max( refs[ref]['maxd'], depth )
refs[ref]['mind'] = min( refs[ref]['mind'], depth )
sum = 0
for o in quals:
q = float( ord(o)-33 )
refs[ref]['maxq'] = max( refs[ref]['maxq'], q )
refs[ref]['minq'] = min( refs[ref]['minq'], q )
sum += q
if depth != 0:
refs[ref]['avgquals'].append( sum / depth )
else:
refs[ref]['avgquals'].append( 0 )
refs[ref]['depths'].append( depth )
refs[ref]['length'] += 1
lastpos[ref] = pos
return refs
# Named tuple to store each region in
CoverageRegion = namedtuple('CoverageRegion', ['start','end','type'])
def get_region_type(depth, qual, gap, lowqual, lowcov):
'''
Return the region type for the given depth and quality combination
'''
# Check for gap
if depth <= gap:
return G
# Check for lowcovqual
elif depth < lowcov and qual < lowqual:
return LCQ
# Check for lowcov
elif depth < lowcov:
return LC
elif qual < lowqual:
return LQ
else:
return N
def regions_from_qualdepth(qualdepth, gap, lowqual, lowcov):
'''
Turn qualdepth into a generator of regions
Each region returned will be a namedtuple with start, end, type filled out
Types are of the following:
'Gap'
'Normal'
'LowCoverage'
'LowQuality'
'LowCovQual'
qualdepth is a dictionary with the following keys
depths(list)
avgquals(list)
minq(float)
maxq(float)
mind(int)
maxd(int)
length(int)
mapped_reads(int)
lowqual and lowcov define the minimum requirements to be called that
type of region. This is a non-inclusive operation(aka value < LowCoverage would be called LowCoverage)
Gap is very similar, except it is inclusive since it could be 0
'''
# The current region we are working on
curregion = [0,0,'']
# Loop through depth and avgqualities together
for basepos, da in enumerate(zip(qualdepth['depths'], qualdepth['avgquals']),start=1):
# Split up the tuple
depth, avgqual = da
# Will hold current region type
regtype = get_region_type(depth, avgqual, gap, lowqual, lowcov)
# For the very first baseposition
if basepos == 1:
curregion = [basepos,basepos,regtype]
# If the region type has changed and not the first base
# then yield the current region
elif regtype != curregion[2]:
# End the current region
curregion[1] = basepos
# Yield the built named tuple
yield CoverageRegion._make(curregion)
# Start a new region now
curregion = [basepos,basepos,regtype]
else:
curregion[1] = basepos
# Increment end by 1 to include the end
curregion[1] += 1
# Yield the last region we are on
yield CoverageRegion._make(curregion)
def lines2d_from_regions(yval, regions, line2dargs):
'''
Create Line2D's for each region using the start and stop values
for the x1,x2 values and yval for y1 and y2
line2dargs is a dictionary mapping each region.type to a line2d argument that
is just all arguments for Line2D to set
Color the line based on the linecolors provided which are a dictionary
mapping the the CoverageRegion.type to a color
Returns a generator of Line2D objects
'''
for region in regions:
yield Line2D([region.start,region.end], [yval,yval], **line2dargs[region.type])
|
gpl-2.0
|
RachitKansal/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
251
|
15097
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
madphysicist/numpy
|
doc/source/conf.py
|
1
|
13666
|
# -*- coding: utf-8 -*-
import os
import re
import sys
# Minimum version, enforced by sphinx
needs_sphinx = '3.2.0'
# This is a nasty hack to use platform-agnostic names for types in the
# documentation.
# must be kept alive to hold the patched names
_name_cache = {}
def replace_scalar_type_names():
""" Rename numpy types to use the canonical names to make sphinx behave """
import ctypes
Py_ssize_t = ctypes.c_int64 if ctypes.sizeof(ctypes.c_void_p) == 8 else ctypes.c_int32
class PyObject(ctypes.Structure):
pass
class PyTypeObject(ctypes.Structure):
pass
PyObject._fields_ = [
('ob_refcnt', Py_ssize_t),
('ob_type', ctypes.POINTER(PyTypeObject)),
]
PyTypeObject._fields_ = [
# varhead
('ob_base', PyObject),
('ob_size', Py_ssize_t),
# declaration
('tp_name', ctypes.c_char_p),
]
# prevent numpy attaching docstrings to the scalar types
assert 'numpy.core._add_newdocs_scalars' not in sys.modules
sys.modules['numpy.core._add_newdocs_scalars'] = object()
import numpy
# change the __name__ of the scalar types
for name in [
'byte', 'short', 'intc', 'int_', 'longlong',
'ubyte', 'ushort', 'uintc', 'uint', 'ulonglong',
'half', 'single', 'double', 'longdouble',
'half', 'csingle', 'cdouble', 'clongdouble',
]:
typ = getattr(numpy, name)
c_typ = PyTypeObject.from_address(id(typ))
c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8')
# now generate the docstrings as usual
del sys.modules['numpy.core._add_newdocs_scalars']
import numpy.core._add_newdocs_scalars
replace_scalar_type_names()
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = [
'sphinx.ext.autodoc',
'numpydoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
'matplotlib.sphinxext.plot_directive',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'sphinx.ext.imgmath',
]
imgmath_image_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
master_doc = 'contents'
# General substitutions.
project = 'NumPy'
copyright = '2008-2021, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
def setup(app):
# add a config value for `ifconfig` directives
app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
app.add_lexer('NumPyC', NumPyLexer)
# While these objects do have type `module`, the names are aliases for modules
# elsewhere. Sphinx does not support referring to modules by an aliases name,
# so we make the alias look like a "real" module for it.
# If we deemed it desirable, we could in future make these real modules, which
# would make `from numpy.char import split` work.
sys.modules['numpy.char'] = numpy.char
sys.modules['numpy.testing.dec'] = numpy.testing.dec
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/numpylogo.svg'
html_theme_options = {
"github_url": "https://github.com/numpy/numpy",
"twitter_url": "https://twitter.com/numpy_team",
}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
if 'sphinx.ext.pngmath' in extensions:
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
plot_html_show_formats = False
plot_html_show_source_link = False
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# XeLaTeX for better support of unicode characters
latex_engine = 'xelatex'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
latex_elements = {
'fontenc': r'\usepackage[LGR,T1]{fontenc}'
}
# Additional stuff for the LaTeX preamble.
latex_elements['preamble'] = r'''
% In the parameters section, place a newline after the Parameters
% header
\usepackage{xcolor}
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% but expdlist old LaTeX package requires fixes:
% 1) remove extra space
\usepackage{etoolbox}
\makeatletter
\patchcmd\@item{{\@breaklabel} }{{\@breaklabel}}{}{}
\makeatother
% 2) fix bug in expdlist's way of breaking the line after long item label
\makeatletter
\def\breaklabel{%
\def\@breaklabel{%
\leavevmode\par
% now a hack because Sphinx inserts \leavevmode after term node
\def\leavevmode{\def\leavevmode{\unhbox\voidb@x}}%
}%
}
\makeatother
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'neps': ('https://numpy.org/neps', None),
'python': ('https://docs.python.org/dev', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org', None),
'imageio': ('https://imageio.readthedocs.io/en/stable', None),
'skimage': ('https://scikit-image.org/docs/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'scipy-lecture-notes': ('https://scipy-lectures.org', None),
}
# -----------------------------------------------------------------------------
# NumPy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def _get_c_source_file(obj):
if issubclass(obj, numpy.generic):
return r"core/src/multiarray/scalartypes.c.src"
elif obj is numpy.ndarray:
return r"core/src/multiarray/arrayobject.c"
else:
# todo: come up with a better way to generate these
return None
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
fn = None
lineno = None
# Make a poor effort at linking C extension types
if isinstance(obj, type) and obj.__module__ == 'numpy':
fn = _get_c_source_file(obj)
if fn is None:
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
fn = relpath(fn, start=dirname(numpy.__file__))
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
if 'dev' in numpy.__version__:
return "https://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
from pygments.lexers import CLexer
from pygments.lexer import inherit, bygroups
from pygments.token import Comment
class NumPyLexer(CLexer):
name = 'NUMPYLEXER'
tokens = {
'statements': [
(r'@[a-zA-Z_]*@', Comment.Preproc, 'macro'),
inherit,
],
}
|
bsd-3-clause
|
piyueh/PetIBM
|
examples/ibpm/cylinder2dRe100_GPU/scripts/plotForceCoefficients.py
|
4
|
1522
|
"""
Plot the instantaneous force coefficients.
Compute the time-averaged force coefficients
and the min/max values for the lift coefficient.
"""
import pathlib
import numpy
from matplotlib import pyplot
# Set up root directory.
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Get the force coefficients.
filepath = data_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx, fy = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
cd, cl = 2 * fx, 2 * fy
# Compute the time-averaged force coefficients.
# Get the min/max values of the lift coefficient.
limits = (100.0, 200.0)
mask = numpy.where(numpy.logical_and(t >= limits[0], t <= limits[1]))
cd_mean, cl_mean = cd[mask].mean(), cl[mask].mean()
cl_min, cl_max = cl[mask].min(), cl[mask].max()
print('<Cd> = {:0.4f}'.format(cd_mean))
print('<Cl> = {:0.4f} ([{:0.4f}, {:0.4f}])'.format(cl_mean, cl_min, cl_max))
pyplot.rc('font', family='serif', size=16)
# Plots the figure.
fig, ax = pyplot.subplots(nrows=2, figsize=(10.0, 6.0), sharex=True)
ax[0].grid()
ax[0].set_ylabel('Drag coefficient')
ax[0].plot(t, cd)
ax[0].set_ylim(1.0, 1.5)
ax[1].grid()
ax[1].set_xlabel('Non-dimensional time')
ax[1].set_ylabel('Lift coefficient')
ax[1].plot(t, cl)
ax[1].set_xlim(0.0, 200.0)
ax[1].set_ylim(-0.4, 0.4)
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'forceCoefficients.png'
fig.savefig(str(filepath), dpi=300)
|
bsd-3-clause
|
MatthieuBizien/scikit-learn
|
examples/gaussian_process/plot_gpc_iris.py
|
81
|
2231
|
"""
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
jay-johnson/sci-pype
|
bins/ml/predictors/predict-from-cache-iris-regressor.py
|
1
|
18957
|
#!/usr/bin/env python
# Load common imports and system envs to build the core object
import sys, os
# For running inside the docker container use:
#import matplotlib
#matplotlib.use('Agg')
# Load the Environment:
os.environ["ENV_DEPLOYMENT_TYPE"] = "JustRedis"
from src.common.inits_for_python import *
#####################################################################
#
# Start Arg Processing:
#
action = "ML Regressor"
parser = argparse.ArgumentParser(description="Parser for Action: " + str(action))
parser.add_argument('-f', '--csvfile', help='CSV File', dest='csvfile')
parser.add_argument('-n', '--dsname', help='Dataset Name', dest='ds_name')
parser.add_argument('-b', '--s3bucket', help='S3 Bucket (Optional)', dest='s_bucket')
parser.add_argument('-k', '--s3key', help='S3 Key (Optional)', dest='s_key')
parser.add_argument('-u', '--usedate', help='Use Date', dest='usedate')
parser.add_argument("-d", "--debug", help="Debug Flag", dest='debug', action='store_true')
args = parser.parse_args()
if args.debug:
debug = True
core.enable_debug()
ds_name = "iris_regressor"
if args.ds_name:
ds_name = str(args.ds_name).strip().lstrip()
now = datetime.datetime.now()
cur_date = now
cur_date_str = now.strftime("%Y-%m-%d")
if args.usedate:
cur_date_str = str(args.usedate)
send_email = "1" # by default send email
s3_bucket = "demodatasets"
s3_key = "dataset_" + str(str(ds_name).upper().strip().lstrip()) + "_" + str(cur_date_str) + ".csv"
analysis_version = 2
if args.s_bucket:
s3_bucket = str(args.s_bucket)
if args.s_key:
s3_key = str(args.s_key)
dataset_filename = "iris.csv"
ml_csv = str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + dataset_filename
if args.csvfile:
ml_csv = str(args.csvfile)
#
# End Arg Processing
#
#####################################################################
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
if os.path.exists(ml_csv) == False:
if os.path.exists("/opt/work/examples/datasets/iris.csv"):
org_path = "/opt/work/examples/datasets/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
elif os.path.exists(os.getenv("ENV_PROJ_REPO_DIR", "/opt/work") + "/examples/datasets/iris.csv"):
org_path = os.getenv("ENV_PROJ_REPO_DIR", "/opt/work") + "/examples/datasets/iris.csv"
os.system("cp " + str(org_path) + " " + ml_csv)
else:
lg("Recreating iris dataset: /opt/work/bins/ml/downloaders/download_iris.py", 6)
os.system("/opt/work/bins/ml/downloaders/download_iris.py")
if os.path.exists(ml_csv) == False:
lg("Failed to recreate iris dataset with: /opt/work/bins/ml/downloaders/download_iris.py", 0)
lg("Stopping", 6)
sys.exit(1)
# end of checking if the csv file is available
lg("Processing ML Predictions for CSV(" + str(ml_csv) + ")", 6)
max_features_to_display = 10
num_estimators = 200
show_importance_plot = True
show_confusion_plot = True
random_state = 0
# For forecasting:
units_ahead_set = []
units_ahead = 0
now = datetime.datetime.now()
title_prefix = ds_name
confusion_plot_title = ds_name + " - Random Forest Confusion Matrix\nThe darker the square on the diagonal the better the predictions\n\n"
featimp_plot_title = ds_name + " - Feature Importance with Estimators(" + str(num_estimators) + ")"
row_names = [ "Actual" ] # CM - Y Axis
col_names = [ "Predictions" ] # CM - X Axis
num_jobs = 8
ranked_features = []
org_ranked_features = []
ml_type = "Predict with Filter"
ml_algo_name = "xgb-regressor"
price_min = 0.10
train_test_ratio = 0.1
# What column has the labeled targets as integers? (added-manually to the dataset)
target_column_name = "ResultLabel"
# possible values in the Target Column
target_column_values = [ "Iris-setosa", "Iris-versicolor", "Iris-virginica" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "ResultTargetValue" ]
# What column holds string labels for the Target Column?
label_column_name = "ResultLabel"
ignore_features = [ # Prune non-int/float columns as needed:
target_column_name,
label_column_name
]
algo_nodes = []
forcast_df = None
ml_request = {
"MLType" : ml_type,
"MLAlgo" : {
"Name" : ml_algo_name,
"Version" : 1,
"Meta" : {
"UnitsAhead" : units_ahead,
"DatasetName" : ds_name,
"FilterMask" : None,
"Source" : {
"CSVFile" : ml_csv,
"S3File" : "", # <Bucket Name>:<Key>
"RedisKey" : "" # <App Name>:<Key>
},
},
"Steps" : {
"Train" :{
"LearningRate" : 0.1,
"NumEstimators" : 1000,
"Objective" : "reg:linear",
"MaxDepth" : 6,
"MaxDeltaStep" : 0,
"MinChildWeight" : 1,
"Gamma" : 0,
"SubSample" : 0.8,
"ColSampleByTree" : 0.8,
"ColSampleByLevel" : 1.0,
"RegAlpha" : 0,
"RegLambda" : 1,
"BaseScore" : 0.5,
"NumThreads" : -1, # infinite = -1
"ScaledPositionWeight" : 1,
"Seed" : 27,
"Debug" : True
}
},
"Cache" : {
"RLoc" : "CACHE:_MODELS_" + str(ds_name) + "_LATEST",
"UseCaches" : True
}
},
"FeatureColumnNames": feature_column_names,
"TargetColumnName" : target_column_name,
"TargetColumnValues": target_column_values,
"IgnoreFeatures" : ignore_features,
"UnitsAheadSet" : units_ahead_set,
"UnitsAheadType" : "",
"PredictionType" : "Predict",
"MaxFeatures" : 10,
"Version" : 1,
"TrackingType" : "UseTargetColAndUnits",
"TrackingName" : core.to_upper(ds_name),
"TrackingID" : "ML_" + ds_name + "_" + str(core.build_unique_key()),
"Debug" : False
}
# Load dataset to build
csv_res = core.ml_load_csv_dataset(ml_request, core.get_rds(), core.get_dbs(), debug)
if csv_res["Status"] != "SUCCESS":
lg("ERROR: Failed to Load CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 0)
sys.exit(1)
ds_df = csv_res["Record"]["SourceDF"]
# Build a Filter for pruning bad records out before creating the train/test sets
samples_filter_mask = (ds_df["SepalLength"] > 0.0) \
& (ds_df["PetalWidth"] > 0.0)
# For patching on the fly you can use the encoder method to replace labels with target dictionary values:
#ready_df = core.ml_encode_target_column(ds_df, "ResultLabel", "Target")
show_pair_plot = False
if show_pair_plot:
lg("Samples(" + str(len(ds_df.index)) + ") in CSV(" + str(ml_request["MLAlgo"]["Meta"]["Source"]["CSVFile"]) + ")", 6)
lg("")
print ds_df.describe()
lg("")
num_per_class = ds_df.groupby("ResultLabel").size()
print num_per_class
lg("")
pair_plot_req = {
"Title" : "Iris Dataset PairPlot",
"SourceDF" : ds_df[samples_filter_mask],
"Style" : "default",
"DiagKind" : "hist", # "kde" or "hist"
"HueColumnName" : ml_request["TargetColumnName"],
"XLabel" : "",
"YLabel" : "",
"CompareColumns": ml_request["FeatureColumnNames"],
"Size" : 3.0,
"ImgFile" : str(os.getenv("ENV_DATA_SRC_DIR", "/opt/work/data/src")) + "/" + "validate_jupyter_iris_classification_pairplot.png",
"ShowPlot" : True
}
core.sb_pair_plot(pair_plot_req)
if os.path.exists(pair_plot_req["ImgFile"]):
lg("Done Plotting Valiation Pair Plot - Saved(" + str(pair_plot_req["ImgFile"]) + ")", 5)
else:
lg("Failed to save Validation Pair Plot(" + str(pair_plot_req["ImgFile"]) + "). Please check the ENV_DATA_SRC_DIR is writeable by this user and exposed to the docker container correctly.", 0)
# end of showing a pairplot for validation
# Create a Prediction Column
ml_request["MLAlgo"]["Meta"]["SamplesFilterMask"] = samples_filter_mask
# Create a Result Column
core.enable_debug()
ml_images = []
train_results = core.ml_train_models_for_predictions(ml_request, core.get_rds(), core.get_dbs(), debug)
if train_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Train Models for Predictions with Error(" + str(train_results["Error"]) + ") StoppedEarly(" + str(train_results["Record"]["StoppedEarly"]) + ")", 0)
sys.exit(1)
algo_nodes = train_results["Record"]["AlgoNodes"]
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
predict_row_df = pd.DataFrame(predict_row, index=[0])
predict_req = {
"AlgoNodes" : algo_nodes,
"PredictionMask": samples_filter_mask,
"PredictionRow" : predict_row_df
}
predict_results = core.ml_compile_predictions_from_models(predict_req, core.get_rds(), core.get_dbs(), debug)
if predict_results["Status"] != "SUCCESS":
lg("ERROR: Failed to Compile Predictions from Models with Error(" + str(predict_results["Error"]) + ")", 0)
sys.exit(1)
lg("Done with Predictions", 6)
if predict_results["Status"] == "SUCCESS":
al_req = train_results["Record"]
al_req["DSName"] = ml_request["TrackingName"]
al_req["Version"] = 1
al_req["FeatureColumnNames"]= ml_request["FeatureColumnNames"]
al_req["TargetColumnName"] = ml_request["TargetColumnName"]
al_req["TargetColumnValues"]= ml_request["TargetColumnValues"]
al_req["IgnoreFeatures"] = ml_request["IgnoreFeatures"]
al_req["PredictionType"] = ml_request["PredictionType"]
al_req["ConfMatrices"] = predict_results["Record"]["ConfMatrices"]
al_req["PredictionMarkers"] = predict_results["Record"]["PredictionMarkers"]
analysis_dataset = core.ml_compile_analysis_dataset(al_req, core.get_rds(), core.get_dbs(), debug)
lg("Analyzed Models(" + str(len(analysis_dataset["Models"])) + ")", 6)
lg("-----------------------------------------------------", 6)
lg("Caching Models", 6)
cache_req = {
"Name" : "CACHE",
"Key" : "_MODELS_" + str(al_req["Tracking"]["TrackingName"]) + "_LATEST",
"TrackingID": "_MD_" + str(al_req["Tracking"]["TrackingName"]),
"Analysis" : analysis_dataset
}
cache_results = core.ml_cache_analysis_and_models(cache_req, core.get_rds(), core.get_dbs(), debug)
lg("Done Caching Models", 6)
lg("-----------------------------------------------------", 6)
lg("Creating Analysis Visualizations", 6)
# Turn this on to show the images:
analysis_dataset["ShowPlot"] = True
analysis_dataset["SourceDF"] = al_req["SourceDF"]
lg("Plotting Feature Importance", 6)
for midx,model_node in enumerate(analysis_dataset["Models"]):
predict_col = model_node["Target"]
if predict_col == "ResultTargetValue":
plot_req = {
"ImgFile" : analysis_dataset["FeatImpImgFile"],
"Model" : model_node["Model"],
"XLabel" : str(predict_col),
"YLabel" : "Importance Amount",
"Title" : str(predict_col) + " Importance Analysis",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_model_feature_importance(plot_req, debug)
for img in image_list:
ml_images.append(img)
# end of for all models
lg("Plotting PairPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Pair Plot",
"ImgFile" : str(analysis_dataset["PairPlotImgFile"]),
"SourceDF" : al_req["SourceDF"],
"HueColumnName" : target_column_name,
"CompareColumns": feature_column_names,
"Markers" : ["o", "s", "D"],
"Width" : 15.0,
"Height" : 15.0,
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_pairplot(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Confusion Matrices", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Confusion Matrix",
"ImgFile" : str(analysis_dataset["CMatrixImgFile"]),
"SourceDF" : al_req["SourceDF"],
"ConfMatrices" : al_req["ConfMatrices"],
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_confusion_matrix(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting Scatters", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Scatter Plot",
"ImgFile" : str(analysis_dataset["ScatterImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 7.0,
"Height" : 7.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_scatterplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Plotting JointPlots", 6)
plot_req = {
"DSName" : str(analysis_dataset["DSName"]),
"Title" : str(analysis_dataset["DSName"]) + " - Joint Plot",
"ImgFile" : str(analysis_dataset["JointPlotImgFile"]),
"SourceDF" : analysis_dataset["SourceDF"],
"UnitsAheadType" : analysis_dataset["UnitsAheadType"],
"FeatureColumnNames": analysis_dataset["FeatureColumnNames"],
"Hue" : label_column_name,
"Width" : 15.0,
"Height" : 15.0,
"XLabel" : "Dates",
"YLabel" : "Values",
"ShowPlot" : analysis_dataset["ShowPlot"]
}
image_list = core.sb_all_jointplots(plot_req, debug)
for img in image_list:
ml_images.append(img)
lg("Done Creating Analysis Visualizations", 6)
lg("-----------------------------------------------------", 6)
else:
lg("", 6)
lg("ERROR: Failed Processing Predictions for Dataset(" + str(ds_name) + ") with Error:", 6)
lg(ml_results["Error"], 6)
lg("", 6)
sys.exit(2)
# end of if success
lg("", 6)
lg("Analysis Complete Saved Images(" + str(len(ml_images)) + ")", 5)
lg("", 6)
sys.exit(0)
|
apache-2.0
|
MicrosoftResearch/Azimuth
|
azimuth/load_data.py
|
2
|
22313
|
import pandas
import util
import matplotlib.pyplot as plt
import scipy as sp
import scipy.stats
import numpy as np
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
def from_custom_file(data_file, learn_options):
# use semantics of when we load V2 data
print "Loading inputs to predict from %s" % data_file
data = pandas.read_csv(data_file)
mandatory_columns = ['30mer', 'Target gene', 'Percent Peptide', 'Amino Acid Cut position']
for col in mandatory_columns:
assert col in data.columns, "inputs for prediction must include these columns: %s" % mandatory_columns
Xdf = pandas.DataFrame(data)
Xdf['30mercopy'] = Xdf['30mer']
Xdf = Xdf.set_index(['30mer', 'Target gene'])
Xdf['30mer'] = Xdf['30mercopy']
Xdf.index.names = ['Sequence', 'Target']
Xdf['drug']= ['dummydrug%s' % i for i in range(Xdf.shape[0])]
Xdf = Xdf.set_index('drug', append=True)
Y = None
gene_position = Xdf[['Percent Peptide', 'Amino Acid Cut position']]
target_genes = np.unique(Xdf.index.levels[1])
learn_options = set_V2_target_names(learn_options)
return Xdf, Y, gene_position, target_genes
def from_file(data_file, learn_options, data_file2=None, data_file3=None):
if learn_options["V"] == 1: # from Nature Biotech paper
print "loading V%d data" % learn_options["V"]
assert not learn_options["weighted"] is not None, "not supported for V1 data"
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options)
learn_options['binary target name'] = 'average threshold'
learn_options['rank-transformed target name'] = 'average rank'
learn_options['raw target name'] = 'average activity'
# NF: not sure why the line below was uncommented
# gene_position, selected_ind, target_genes, Xdf, Y = extract_by_organism("mouse", Xdf, Y, gene_position)
elif learn_options["V"] == 2: # from Nov 2014, hot off the machines
Xdf, drugs_to_genes, target_genes, Y, gene_position = read_V2_data(data_file, learn_options)
# check that data is consistent with sgRNA score
xx = Xdf['sgRNA Score'].values
yy = Y['score_drug_gene_rank'].values
rr,pp = sp.stats.pearsonr(xx, yy)
assert rr > 0, "data processing has gone wrong as correlation with previous predictions is negative"
learn_options = set_V2_target_names(learn_options)
elif learn_options["V"] == 3: # merge of V1 and V2--this is what is used for the final model
# these are relative to the V2 data, and V1 will be made to automatically match
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
Xdf, Y, gene_position, target_genes = mergeV1_V2(data_file, data_file2, learn_options)
elif learn_options["V"] == 4: # merge of V1 and V2 and the Xu et al data
# these are relative to the V2 data, and V1 and Xu et al. will be made to automatically match
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
Xdf, Y, gene_position, target_genes = merge_all(data_file, data_file2, data_file3, learn_options)
elif learn_options['V'] == 5:
learn_options['binary target name'] = 'score_drug_gene_threshold'
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = None
gene_position, target_genes, Xdf, Y = read_xu_et_al(data_file3)
# truncate down to 30--some data sets gave us more.
Xdf["30mer"] = Xdf["30mer"].apply(lambda x: x[0:30])
return Xdf, Y, gene_position, target_genes
def set_V2_target_names(learn_options):
if 'binary target name' not in learn_options.keys():
learn_options['binary target name'] = 'score_drug_gene_threshold'
if 'rank-transformed target name' not in learn_options.keys():
learn_options['rank-transformed target name'] = 'score_drug_gene_rank'
learn_options['raw target name'] = 'score'
return learn_options
def combine_organisms(human_data, mouse_data):
# 'Target' is the column name, 'CD13' are some rows in that column
# xs slices through the pandas data frame to return another one
cd13 = human_data.xs('CD13', level='Target', drop_level=False)
# y_names are column names, cd13 is a pandas object
X_CD13, Y_CD13 = util.get_data(cd13, y_names=['NB4 CD13', 'TF1 CD13'])
cd33 = human_data.xs('CD33', level='Target', drop_level=False)
X_CD33, Y_CD33 = util.get_data(cd33, y_names=['MOLM13 CD33', 'TF1 CD33', 'NB4 CD33'])
cd15 = human_data.xs('CD15', level='Target', drop_level=False)
X_CD15, Y_CD15 = util.get_data(cd15, y_names=['MOLM13 CD15'])
mouse_X = pandas.DataFrame()
mouse_Y = pandas.DataFrame()
for k in mouse_data.index.levels[1]:
# is k the gene
X, Y = util.get_data(mouse_data.xs(k, level='Target', drop_level=False), ["On-target Gene"], target_gene=k, organism='mouse')
mouse_X = pandas.concat([mouse_X, X], axis=0)
mouse_Y = pandas.concat([mouse_Y, Y], axis=0)
X = pandas.concat([X_CD13, X_CD15, X_CD33, mouse_X], axis=0)
Y = pandas.concat([Y_CD13, Y_CD15, Y_CD33, mouse_Y], axis=0)
return X, Y
def read_V1_data(data_file, learn_options, AML_file=cur_dir + "/data/V1_suppl_data.txt"):
if data_file is None:
data_file = cur_dir + "/data/V1_data.xlsx"
human_data = pandas.read_excel(data_file, sheetname=0, index_col=[0, 1])
mouse_data = pandas.read_excel(data_file, sheetname=1, index_col=[0, 1])
Xdf, Y = combine_organisms(human_data, mouse_data)
# get position within each gene, then join and re-order
# note that 11 missing guides we were told to ignore
annotations = pandas.read_csv(AML_file, delimiter='\t', index_col=[0, 4])
annotations.index.names = Xdf.index.names
gene_position = pandas.merge(Xdf, annotations, how="inner", left_index=True, right_index=True)
gene_position = util.impute_gene_position(gene_position)
gene_position = gene_position[['Amino Acid Cut position', 'Nucleotide cut position', 'Percent Peptide']]
Y = Y.loc[gene_position.index]
Xdf = Xdf.loc[gene_position.index]
Y['test'] = 1 # for bookeeping to keep consistent with V2 which uses this for "extra pairs"
target_genes = Y['Target gene'].unique()
Y.index.names = ['Sequence', 'Target gene']
assert Xdf.index.equals(Y.index), "The index of Xdf is different from the index of Y (this can cause inconsistencies/random performance later on)"
if learn_options is not None and learn_options["flipV1target"]:
print "************************************************************************"
print "*****************MATCHING DOENCH CODE (DEBUG MODE)**********************"
print "************************************************************************"
# normally it is: Y['average threshold'] = Y['average rank'] > 0.8, where 1s are good guides, 0s are not
Y['average threshold'] = Y['average rank'] < 0.2 # 1s are bad guides
print "press c to continue"
import ipdb
ipdb.set_trace()
return annotations, gene_position, target_genes, Xdf, Y
def rank_transform(x):
return 1.0 - sp.stats.mstats.rankdata(x)/sp.stats.mstats.rankdata(x).max()
def read_xu_et_al(data_file, learn_options=None, verbose=True, subsetting='ours'):
if data_file is None:
data_file = '../data/xu_et_al_data.xlsx'
datasets = ['ribo', 'non_ribo', 'mESC']
aggregated = None
for d in datasets:
data_efficient = pandas.read_excel(data_file, sheetname='%s_efficient_sgRNA' % d, skiprows=2)
data_inefficient = pandas.read_excel(data_file, sheetname='%s_inefficient_sgRNA' % d, skiprows=2)
data_efficient['threshold'] = 1.
data_inefficient['threshold'] = 0.
exp_data = pandas.concat((data_efficient, data_inefficient))
exp_data['rank_KBM7'] = exp_data.groupby('Gene Symbol')['log2 fold change, KBM7'].transform(rank_transform)
exp_data['rank_HL60'] = exp_data.groupby('Gene Symbol')['log2 fold change, HL60'].transform(rank_transform)
if aggregated is None:
aggregated = exp_data
else:
aggregated = pandas.concat((aggregated, exp_data))
# go from 40mer to 30mer
if subsetting == 'ours':
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x[6:-4])
else:
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x[10:])
# make sure EVEYTHING is uppercase
aggregated["sequence(target+3'+5')"] = aggregated["sequence(target+3'+5')"].apply(lambda x: x.upper())
# rename columns
aggregated.rename(columns={"sequence(target+3'+5')": '30mer', 'Gene Symbol': 'Target gene', 'strand':'Strand'}, inplace=True)
aggregated['Strand'].loc[aggregated['Strand']=='+'] = 'sense'
aggregated['Strand'].loc[aggregated['Strand']=='-'] = 'antisense'
aggregated['average rank'] = aggregated[['rank_HL60', 'rank_KBM7']].mean(axis=1)
df = aggregated
df = df.rename(columns={'30mer': 'Sequence', 'Target gene': 'Target'})
df['drug'] = 'nodrug'
df['test'] = 1
df = df.set_index(['Sequence', 'Target', 'drug'])
df['30mer'] = df.index.get_level_values(0)
df['Target gene'] = df.index.get_level_values(1)
df['Organism'] = 'unknown'
df['score_drug_gene_rank'] = df['average rank']
df['score_drug_gene_threshold'] = df['threshold']
df['Nucleotide cut position'] = df['start of target']
df['Percent Peptide'] = 0
df['Amino Acid Cut position'] = 0
target_genes = np.unique(df['Target gene'].values)
return df[['Nucleotide cut position', 'Percent Peptide', 'Amino Acid Cut position']], target_genes, df[['30mer', 'Strand']], df[['score_drug_gene_rank', 'score_drug_gene_threshold', 'test', 'Target gene']]
def read_V2_data(data_file, learn_options=None, verbose=True):
if data_file is None:
data_file = cur_dir + "/data/V2_data.xlsx"
# to compare
# import predict as pr; a1, g1, t1, X1, Y1 = pr.data_setup()
# a1.index.names
data = pandas.read_excel(data_file, sheetname="ResultsFiltered", skiprows=range(0, 6+1), index_col=[0, 4])
# grab data relevant to each of three drugs, which exludes some genes
# note gene MED12 has two drugs, all others have at most one
Xdf = pandas.DataFrame()
# This comes from the "Pairs" tab in their excel sheet,
# note HPRT/HPRT1 are same thing, and also PLX_2uM/PLcX_2uM
known_pairs = {'AZD_200nM': ['CCDC101', 'MED12', 'TADA2B', 'TADA1'],
'6TG_2ug/mL': ['HPRT1'],
'PLX_2uM': ['CUL3', 'NF1', 'NF2', 'MED12']}
drugs_to_genes = {'AZD_200nM': ['CCDC101', 'MED12', 'TADA2B', 'TADA1'],
'6TG_2ug/mL': ['HPRT1'],
'PLX_2uM': ['CUL3', 'NF1', 'NF2', 'MED12']}
if learn_options is not None:
assert not (learn_options['extra pairs'] and learn_options['all pairs']), "extra pairs and all pairs options (in learn_options) can't be active simultaneously."
if learn_options['extra pairs']:
drugs_to_genes['AZD_200nM'].extend(['CUL3', 'NF1', 'NF2'])
elif learn_options['all pairs']:
drugs_to_genes['AZD_200nM'].extend(['HPRT1', 'CUL3', 'NF1', 'NF2'])
drugs_to_genes['PLX_2uM'].extend(['HPRT1', 'CCDC101', 'TADA2B', 'TADA1'])
drugs_to_genes['6TG_2ug/mL'].extend(['CCDC101', 'MED12', 'TADA2B', 'TADA1', 'CUL3', 'NF1', 'NF2'])
count = 0
for drug in drugs_to_genes.keys():
genes = drugs_to_genes[drug]
for g in genes:
Xtmp = data.copy().xs(g, level='Target gene', drop_level=False)
Xtmp['drug'] = drug
Xtmp['score'] = Xtmp[drug].copy() # grab the drug results that are relevant for this gene
if g in known_pairs[drug]:
Xtmp['test'] = 1.
else:
Xtmp['test'] = 0.
count = count + Xtmp.shape[0]
Xdf = pandas.concat([Xdf, Xtmp], axis=0)
if verbose:
print "Loaded %d samples for gene %s \ttotal number of samples: %d" % (Xtmp.shape[0], g, count)
# create new index that includes the drug
Xdf = Xdf.set_index('drug', append=True)
Y = pandas.DataFrame(Xdf.pop("score"))
Y.columns.names = ["score"]
test_gene = pandas.DataFrame(Xdf.pop('test'))
target = pandas.DataFrame(Xdf.index.get_level_values('Target gene').values, index=Y.index, columns=["Target gene"])
Y = pandas.concat((Y, target, test_gene), axis=1)
target_genes = Y['Target gene'].unique()
gene_position = Xdf[["Percent Peptide", "Amino Acid Cut position"]].copy()
# convert to ranks for each (gene, drug combo)
# flip = True
y_rank = pandas.DataFrame()
y_threshold = pandas.DataFrame()
y_quant = pandas.DataFrame()
for drug in drugs_to_genes.keys():
gene_list = drugs_to_genes[drug]
for gene in gene_list:
ytmp = pandas.DataFrame(Y.xs((gene, drug), level=["Target gene", "drug"], drop_level=False)['score'])
y_ranktmp, y_rank_raw, y_thresholdtmp, y_quanttmp = util.get_ranks(ytmp, thresh=0.8, prefix="score_drug_gene", flip=False)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pandas.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pandas.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pandas.concat((y_quant, y_quanttmp), axis=0)
yall = pandas.concat((y_rank, y_threshold, y_quant), axis=1)
Y = pandas.merge(Y, yall, how='inner', left_index=True, right_index=True)
# convert also by drug only, irrespective of gene
y_rank = pandas.DataFrame()
y_threshold = pandas.DataFrame()
y_quant = pandas.DataFrame()
for drug in drugs_to_genes.keys():
ytmp = pandas.DataFrame(Y.xs(drug, level="drug", drop_level=False)['score'])
y_ranktmp, y_rank_raw, y_thresholdtmp, y_quanttmp = util.get_ranks(ytmp, thresh=0.8, prefix="score_drug", flip=False)
# np.unique(y_rank.values-y_rank_raw.values)
y_rank = pandas.concat((y_rank, y_ranktmp), axis=0)
y_threshold = pandas.concat((y_threshold, y_thresholdtmp), axis=0)
y_quant = pandas.concat((y_quant, y_quanttmp), axis=0)
yall = pandas.concat((y_rank, y_threshold, y_quant), axis=1)
Y = pandas.merge(Y, yall, how='inner', left_index=True, right_index=True)
PLOT = False
if PLOT:
# to better understand, try plotting something like:
labels = ["score", "score_drug_gene_rank", "score_drug_rank", "score_drug_gene_threshold", "score_drug_threshold"]
for label in labels:
plt.figure()
plt.plot(Xdf['sgRNA Score'].values, Y[label].values, '.')
r, pearp = sp.stats.pearsonr(Xdf['sgRNA Score'].values.flatten(), Y[label].values.flatten())
plt.title(label + ' VS pred. score, $r$=%0.2f (p=%0.2e)' % (r, pearp))
plt.xlabel("sgRNA prediction score")
plt.ylabel(label)
gene_position = util.impute_gene_position(gene_position)
if learn_options is not None and learn_options["weighted"] == "variance":
print "computing weights from replicate variance..."
# compute the variance across replicates so can use it as a weight
data = pandas.read_excel(data_file, sheetname="Normalized", skiprows=range(0, 6+1), index_col=[0, 4])
data.index.names = ["Sequence", "Target gene"]
experiments = {}
experiments['AZD_200nM'] = ['Deep 25', 'Deep 27', 'Deep 29 ', 'Deep 31']
experiments['6TG_2ug/mL'] = ['Deep 33', 'Deep 35', 'Deep 37', 'Deep 39']
experiments['PLX_2uM'] = ['Deep 49', 'Deep 51', 'Deep 53', 'Deep 55']
variance = None
for drug in drugs_to_genes.keys():
data_tmp = data.iloc[data.index.get_level_values('Target gene').isin(drugs_to_genes[drug])][experiments[drug]]
data_tmp["drug"] = drug
data_tmp = data_tmp.set_index('drug', append=True)
data_tmp["variance"] = np.var(data_tmp.values, axis=1)
if variance is None:
variance = data_tmp["variance"].copy()
else:
variance = pandas.concat((variance, data_tmp["variance"]), axis=0)
orig_index = Y.index.copy()
Y = pandas.merge(Y, pandas.DataFrame(variance), how="inner", left_index=True, right_index=True)
Y = Y.ix[orig_index]
print "done."
# Make sure to keep this check last in this function
assert Xdf.index.equals(Y.index), "The index of Xdf is different from the index of Y (this can cause inconsistencies/random performance later on)"
return Xdf, drugs_to_genes, target_genes, Y, gene_position
def merge_all(data_file=None, data_file2=None, data_file3=None, learn_options=None):
Xdf, Y, gene_position, target_genes = mergeV1_V2(data_file, data_file2, learn_options)
gene_position_xu, target_genes_xu, Xdf_xu, Y_xu = read_xu_et_al(data_file3, learn_options)
Xdf = pandas.concat((Xdf, Xdf_xu))
Y = pandas.concat((Y, Y_xu))
gene_position = pandas.concat((gene_position, gene_position_xu))
target_genes = np.concatenate((target_genes, target_genes_xu))
return Xdf, Y, gene_position, target_genes
def mergeV1_V2(data_file, data_file2, learn_options):
'''
ground_truth_label, etc. are taken to correspond to the V2 data, and then the V1 is appropriately matched
based on semantics
'''
assert not learn_options['include_strand'], "don't currently have 'Strand' column in V1 data"
annotations, gene_position1, target_genes1, Xdf1, Y1 = read_V1_data(data_file, learn_options)
Xdf2, drugs_to_genes, target_genes2, Y2, gene_position2 = read_V2_data(data_file2)
Y1.rename(columns={'average rank': learn_options["rank-transformed target name"]}, inplace=True)
Y1.rename(columns={'average threshold': learn_options["binary target name"]}, inplace=True)
# rename columns, and add a dummy "drug" to V1 so can join the data sets
Y1["drug"] = ["nodrug" for x in range(Y1.shape[0])]
Y1 = Y1.set_index('drug', append=True)
Y1.index.names = ['Sequence', 'Target gene', 'drug']
Y_cols_to_keep = np.unique(['Target gene', 'test', 'score_drug_gene_rank', 'score_drug_gene_threshold'])
Y1 = Y1[Y_cols_to_keep]
Y2 = Y2[Y_cols_to_keep]
Xdf1["drug"] = ["nodrug" for x in range(Xdf1.shape[0])]
Xdf1 = Xdf1.set_index('drug', append=True)
X_cols_to_keep = ['30mer', 'Strand']
Xdf1 = Xdf1[X_cols_to_keep]
Xdf2 = Xdf2[X_cols_to_keep]
gene_position1["drug"] = ["nodrug" for x in range(gene_position1.shape[0])]
gene_position1 = gene_position1.set_index('drug', append=True)
gene_position1.index.names = ['Sequence', 'Target gene', 'drug']
cols_to_keep = [u'Percent Peptide', u'Amino Acid Cut position']
gene_position1 = gene_position1[cols_to_keep]
gene_position2 = gene_position2[cols_to_keep]
Y = pandas.concat((Y1, Y2), axis=0)
Xdf = pandas.concat((Xdf1, Xdf2), axis=0)
gene_position = pandas.concat((gene_position1, gene_position2))
# target_genes = target_genes1 + target_genes2
target_genes = np.concatenate((target_genes1, target_genes2))
save_to_file = False
if save_to_file:
Y.index.names = ['Sequence', 'Target', 'drug']
assert np.all(Xdf.index.values==Y.index.values), "rows don't match up"
onedupind = np.where(Y.index.duplicated())[0][0]
alldupind = np.where(Y.index.get_level_values(0).values==Y.index[onedupind][0])[0]
#arbitrarily set one of these to have "nodrug2" as the third level index
#so that they are not repeated, and the joints therefore do not augment the data set
assert len(alldupind)==2, "expected only duplicates"
newindex = Y.index.tolist()
newindex[onedupind] = (newindex[onedupind][0], newindex[onedupind][1], "nodrug2")
Y.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
Xdf.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
# there seems to be a duplicate index, and thus this increases the data set size, so doing it the hacky way...
XandY = pandas.merge(Xdf, Y, how="inner", left_index=True, right_index=True)
gene_position_tmp = gene_position.copy()
gene_position_tmp.index.names = ['Sequence', 'Target', 'drug']
gene_position_tmp.index = pandas.MultiIndex.from_tuples(newindex, names = Y.index.names)
XandY = pandas.merge(XandY, gene_position_tmp, how="inner", left_index=True, right_index=True)
# truncate to 30mers
XandY["30mer"] = XandY["30mer"].apply(lambda x: x[0:30])
XandY.to_csv(r'D:\Source\CRISPR\data\tmp\V3.csv')
return Xdf, Y, gene_position, target_genes
def get_V1_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
return target_genes
def get_V2_genes(data_file=None):
Xdf, drugs_to_genes, target_genes, Y, gene_position = read_V2_data(data_file, verbose=False)
return target_genes
def get_V3_genes(data_fileV1=None, data_fileV2=None):
target_genes = np.concatenate((get_V1_genes(data_fileV1), get_V2_genes(data_fileV2)))
return target_genes
def get_xu_genes(data_file=None):
return read_xu_et_al(data_file)[1]
def get_mouse_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
return Xdf[Xdf['Organism'] == 'mouse']['Target gene'].unique()
def get_human_genes(data_file=None):
annotations, gene_position, target_genes, Xdf, Y = read_V1_data(data_file, learn_options=None)
mouse_genes = Xdf[Xdf['Organism'] == 'mouse']['Target gene'].unique()
all_genes = get_V3_genes(None, None) # TODO this needs to support specifying file names (!= 'None')
return np.setdiff1d(all_genes, mouse_genes)
|
bsd-3-clause
|
glemaitre/UnbalancedDataset
|
imblearn/under_sampling/prototype_selection/tests/test_edited_nearest_neighbours.py
|
2
|
4597
|
"""Test the module edited nearest neighbour."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
from __future__ import print_function
import numpy as np
from pytest import raises
from sklearn.utils.testing import assert_array_equal
from sklearn.neighbors import NearestNeighbors
from imblearn.under_sampling import EditedNearestNeighbours
from imblearn.utils.testing import warns
X = np.array([[2.59928271, 0.93323465], [0.25738379, 0.95564169],
[1.42772181, 0.526027], [1.92365863, 0.82718767],
[-0.10903849, -0.12085181], [-0.284881, -0.62730973],
[0.57062627, 1.19528323], [0.03394306, 0.03986753],
[0.78318102, 2.59153329], [0.35831463, 1.33483198],
[-0.14313184, -1.0412815], [0.01936241, 0.17799828],
[-1.25020462, -0.40402054], [-0.09816301, -0.74662486],
[-0.01252787, 0.34102657], [0.52726792, -0.38735648],
[0.2821046, -0.07862747], [0.05230552, 0.09043907],
[0.15198585, 0.12512646], [0.70524765, 0.39816382]])
Y = np.array([1, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 2, 2, 2, 2, 1, 2, 1])
def test_enn_init():
enn = EditedNearestNeighbours()
assert enn.n_neighbors == 3
assert enn.kind_sel == 'all'
assert enn.n_jobs == 1
def test_enn_fit_sample():
enn = EditedNearestNeighbours()
X_resampled, y_resampled = enn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[2.59928271, 0.93323465], [1.92365863, 0.82718767],
[0.25738379, 0.95564169], [0.78318102, 2.59153329],
[0.52726792, -0.38735648]])
y_gt = np.array([0, 0, 1, 1, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_fit_sample_with_indices():
enn = EditedNearestNeighbours(return_indices=True)
X_resampled, y_resampled, idx_under = enn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[2.59928271, 0.93323465], [1.92365863, 0.82718767],
[0.25738379, 0.95564169], [0.78318102, 2.59153329],
[0.52726792, -0.38735648]])
y_gt = np.array([0, 0, 1, 1, 2, 2, 2])
idx_gt = np.array([4, 11, 0, 3, 1, 8, 15])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
assert_array_equal(idx_under, idx_gt)
def test_enn_fit_sample_mode():
enn = EditedNearestNeighbours(kind_sel='mode')
X_resampled, y_resampled = enn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[2.59928271, 0.93323465], [1.42772181, 0.526027],
[1.92365863, 0.82718767], [0.25738379, 0.95564169],
[-0.284881, -0.62730973], [0.57062627, 1.19528323],
[0.78318102, 2.59153329], [0.35831463, 1.33483198],
[-0.14313184, -1.0412815], [-0.09816301, -0.74662486],
[0.52726792, -0.38735648], [0.2821046, -0.07862747]])
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_fit_sample_with_nn_object():
nn = NearestNeighbors(n_neighbors=4)
enn = EditedNearestNeighbours(
n_neighbors=nn, kind_sel='mode')
X_resampled, y_resampled = enn.fit_sample(X, Y)
X_gt = np.array([[-0.10903849, -0.12085181], [0.01936241, 0.17799828],
[2.59928271, 0.93323465], [1.42772181, 0.526027],
[1.92365863, 0.82718767], [0.25738379, 0.95564169],
[-0.284881, -0.62730973], [0.57062627, 1.19528323],
[0.78318102, 2.59153329], [0.35831463, 1.33483198],
[-0.14313184, -1.0412815], [-0.09816301, -0.74662486],
[0.52726792, -0.38735648], [0.2821046, -0.07862747]])
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_enn_not_good_object():
nn = 'rnd'
enn = EditedNearestNeighbours(
n_neighbors=nn, kind_sel='mode')
with raises(ValueError, match="has to be one of"):
enn.fit_sample(X, Y)
def test_deprecation_random_state():
enn = EditedNearestNeighbours(random_state=0)
with warns(DeprecationWarning,
match="'random_state' is deprecated from 0.4"):
enn.fit_sample(X, Y)
|
mit
|
apeyrard/sjtu-work
|
DIP/exercises/ex1/ex1.py
|
1
|
2020
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import sys
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='Choose how to process input image.')
parser.add_argument('image')
args = parser.parse_args()
def getHist(data, maxPix):
hist = np.zeros(maxPix, dtype=int)
for x in data:
hist[x] += 1
return hist
try:
with Image.open(args.image) as im:
#getting the list of pixel values
data = list(im.getdata())
maxPix = 256
#getting histogram
histList = getHist(data, maxPix)
#plotting histogram
plt.bar(np.arange(maxPix), histList)
plt.ylabel('Nb of pixels')
plt.xlabel('Value')
plt.xlim(0, maxPix)
plt.show()
#total number of pixels
total = sum(histList)
#maps value to new value in [0, 255] range
def transform(value):
tmpSum = 0
for j in range(value):
tmpSum += histList[j]/total
return tmpSum*maxPix
#getting cumulative distribution function
cdf = np.zeros(maxPix)
for i,val in enumerate(histList):
if i != 0:
cdf[i] = cdf[i-1] + val
#plotting cdf
plt.plot(cdf)
plt.ylabel('Value of new pixel')
plt.xlabel('Value of initial pixel')
plt.xlim(0, maxPix)
plt.show()
#new image
newim = Image.new(im.mode, im.size)
ptr = newim.load()
for i, pixel in enumerate(data):
newValue = round(transform(pixel))
ptr[i%im.size[0], i//im.size[0]] = newValue
newim.show()
data = list(newim.getdata())
histList = getHist(data, maxPix)
plt.bar(np.arange(maxPix), histList)
plt.ylabel('Nb of pixels')
plt.xlabel('Value')
plt.xlim(0, maxPix)
plt.show()
except FileNotFoundError as e:
sys.exit("Error : file not found")
|
mit
|
rlowrance/re-avm
|
rfbound.py
|
1
|
5200
|
'''program to estimate the generalization error from a variety of AVMs
INPUT FILE:
WORKING/samples-train-validate.csv
OUTPUT FILE:
WORKING/rfbound/[test-]HP-YYYYMM-NN.pickle
'''
from __future__ import division
import cPickle as pickle
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sklearn
import sklearn.grid_search
import sklearn.metrics
import sys
import AVM
from Bunch import Bunch
from columns_contain import columns_contain
import layout_transactions as transactions
from Logger import Logger
from ParseCommandLine import ParseCommandLine
from Path import Path
# from TimeSeriesCV import TimeSeriesCV
cc = columns_contain
def usage(msg=None):
print __doc__
if msg is not None:
print msg
print 'usage : python rfbound.py HP YYYYMM NN [--test]'
print ' HP {max_depth | max_features}'
print ' YYYYMM year + month; ex: 200402'
print ' NN number of folds to use for the cross validating'
print ' --test run in test mode (on a small sample of the entire data)',
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if not(4 <= len(argv) <= 5):
usage('invalid number of arguments')
pcl = ParseCommandLine(argv)
arg = Bunch(
base_name='rfbound',
hp=argv[1],
yyyymm=argv[2],
folds=argv[3],
test=pcl.has_arg('--test'),
)
try:
arg.folds = int(arg.folds)
except:
usage('INT not an integer; ' + str(arg.folds))
random_seed = 123
random.seed(random_seed)
dir_working = Path().dir_working()
debug = False
out_file_name = (
'%s/%s%s-%s-folds-%02d.pickle' % (
arg.base_name,
('test-' if arg.test else ''),
arg.hp,
arg.yyyymm,
arg.folds)
)
# assure the output directory exists
dir_path = dir_working + arg.base_name
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return Bunch(
arg=arg,
debug=debug,
path_in=dir_working + 'samples-train-validate.csv',
path_out=dir_working + out_file_name,
random_seed=random_seed,
test=arg.test,
)
def print_gscv(gscv, tag=None, only_best=False):
pdb.set_trace()
print 'result from GridSearchCV'
if tag is not None:
print 'for', str(tag)
def print_params(params):
for k, v in params.iteritems():
print ' parameter %15s: %s' % (k, v)
def print_grid_score(gs):
print ' mean: %.0f std: %0.f' % (gs.mean_validation_score, np.std(gs.cv_validation_scores))
for cv_vs in gs.cv_validation_scores:
print ' validation score: %0.6f' % cv_vs
print_params(gs.parameters)
if not only_best:
for i, grid_score in enumerate(gscv.grid_scores_):
print 'grid index', i
print_grid_score(grid_score)
print 'best score', gscv.best_score_
print 'best estimator', gscv.best_estimator_
print 'best params'
print_params(gscv.best_params_)
print 'scorer', gscv.scorer_
def do_rfbound(control, samples):
'run grid search on random forest model; return grid search object'
# HP settings to test
# common across --rfbound options
model_name_seq = ('RandomForestRegressor',)
n_months_back_seq = (1, 2, 3, 4, 5, 6)
n_estimators_seq = (10, 30, 100, 300, 1000)
# not common across --rfbound options
max_features_seq = (1, 'log2', 'sqrt', .1, .3, 'auto')
max_depth_seq = (1, 3, 10, 30, 100, 300)
gscv = sklearn.grid_search.GridSearchCV(
estimator=AVM.AVM(),
param_grid=dict(
model_name=model_name_seq,
n_months_back=n_months_back_seq,
forecast_time_period=[int(control.arg.yyyymm)],
n_estimators=n_estimators_seq,
max_depth=max_depth_seq if control.arg.hp == 'max_depth' else [None],
max_features=max_features_seq if control.arg.hp == 'max_features' else [None],
random_state=[control.random_seed],
),
scoring=AVM.avm_scoring,
n_jobs=1 if control.test else -1,
cv=control.arg.folds,
verbose=1 if control.test else 0,
)
gscv.fit(samples)
print 'gscv'
pprint(gscv)
# print_gscv(gscv, tag=control.arg.rfbound, only_best=True)
return gscv
def main(argv):
control = make_control(argv)
if False:
# avoid error in sklearn that requires flush to have no arguments
sys.stdout = Logger(base_name=control.arg.base_name)
print control
samples = pd.read_csv(
control.path_in,
nrows=1000 if control.test else None,
)
print 'samples.shape', samples.shape
result = do_rfbound(control, samples)
with open(control.path_out, 'wb') as f:
pickle.dump((result, control), f)
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
print transactions
main(sys.argv)
|
bsd-3-clause
|
kdebrab/pandas
|
pandas/core/reshape/reshape.py
|
1
|
35362
|
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, text_type, zip
from pandas import compat
from functools import partial
import itertools
import numpy as np
from pandas.core.dtypes.common import (
ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion, is_sparse, is_object_dtype)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.arrays import Categorical
from pandas.core.arrays.categorical import _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.index import Index, MultiIndex
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
values : ndarray
Values of DataFrame to "Unstack"
index : object
Pandas ``Index``
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
value_columns : Index, optional
Pandas ``Index`` or ``MultiIndex`` object if unstacking a DataFrame
fill_value : scalar, optional
Default value to fill in missing values if subgroups do not have the
same set of labels. By default, missing values will be replaced with
the default fill value for that data type, NaN for float, NaT for
datetimelike, etc. For integer types, by default data will converted to
float and missing values will be set to NaN.
constructor : object
Pandas ``DataFrame`` or subclass used to create unstacked
response. If None, DataFrame or SparseDataFrame will be used.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None, constructor=None):
self.is_categorical = None
self.is_sparse = is_sparse(values)
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
elif self.is_sparse:
# XXX: Makes SparseArray *dense*, but it's supposedly
# a single column at a time, so it's "doable"
values = values.values
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if constructor is None:
if self.is_sparse:
self.constructor = SparseDataFrame
else:
self.constructor = DataFrame
else:
self.constructor = constructor
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index.remove_unused_levels()
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(self.index.levels)
self.new_index_names = list(self.index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self.removed_level_full = index.levels[self.level]
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
values, _ = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return self.constructor(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{name}".format(name=name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, lev._na_value)
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level_full,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level_full]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
# The two indices differ only if the unstacked level had unused items:
if len(self.removed_level_full) != len(self.removed_level):
# In this case, we remap the new labels to the original level:
repeater = self.removed_level_full.get_indexer(self.removed_level)
if self.lift:
repeater = np.insert(repeater, 0, -1)
else:
# Otherwise, we just use each level item exactly once:
repeater = np.arange(stride) - self.lift
# The entire level is then just a repetition of the single chunk:
new_labels.append(np.tile(repeater, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), lev._na_value)
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs, fill_value=None):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
if rlocs == []:
# Everything is in clocs, so the dummy df has a regular index
dummy_index = Index(obs_ids, name='__placeholder__')
else:
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__', fill_value=fill_value)
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
else:
if index is None:
index = self.index
else:
index = self[index]
index = MultiIndex.from_arrays([index, self[columns]])
if is_list_like(values) and not isinstance(values, tuple):
# Exclude tuple because it is seen as a single column name
indexed = self._constructor(self[values].values, index=index,
columns=values)
else:
indexed = self._constructor_sliced(self[values].values,
index=index)
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
if len(level) != 1:
# _unstack_multiple only handles MultiIndexes,
# and isn't needed for a single level
return _unstack_multiple(obj, level, fill_value=fill_value)
else:
level = level[0]
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value,
constructor=obj._constructor_expanddim)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
if obj._is_mixed_type:
unstacker = partial(_Unstacker, index=obj.index,
level=level, fill_value=fill_value)
blocks = obj._data.unstack(unstacker)
return obj._constructor(blocks)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value,
constructor=obj._constructor)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return frame._constructor_sliced(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
try:
loc = this.columns.get_loc(key)
except KeyError:
drop_cols.append(key)
continue
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = frame._constructor(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False, dtype=None):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
.. versionadded:: 0.23.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> pd.get_dummies(pd.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
dtypes_to_encode = ['object', 'category']
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
data_to_encode = data.select_dtypes(
include=dtypes_to_encode)
else:
data_to_encode = data[columns]
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
len_msg = ("Length of '{name}' ({len_item}) did not match the "
"length of the columns being encoded ({len_enc}).")
if is_list_like(item):
if not len(item) == data_to_encode.shape[1]:
len_msg = \
len_msg.format(name=name, len_item=len(item),
len_enc=data_to_encode.shape[1])
raise ValueError(len_msg)
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in data_to_encode.columns]
if prefix is None:
prefix = data_to_encode.columns
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in data_to_encode.columns]
if data_to_encode.shape == data.shape:
# Encoding the entire df, do not prepend any dropped columns
with_dummies = []
elif columns is not None:
# Encoding only cols specified in columns. Get all cols not in
# columns to prepend to result.
with_dummies = [data.drop(columns, axis=1)]
else:
# Encoding only object and category dtype columns. Get remaining
# columns to prepend to result.
with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)]
for (col, pre, sep) in zip(data_to_encode.iteritems(), prefix,
prefix_sep):
# col is (column_name, column), use just column data here
dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first, dtype=dtype)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse,
drop_first=drop_first,
dtype=dtype)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False, dtype=None):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
if dtype is None:
dtype = np.uint8
dtype = np.dtype(dtype)
if is_object_dtype(dtype):
raise ValueError("dtype=object is not a valid dtype for get_dummies")
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_strs = [u'{prefix}{sep}{level}' if isinstance(v, text_type)
else '{prefix}{sep}{level}' for v in levels]
dummy_cols = [dummy_str.format(prefix=prefix, sep=prefix_sep, level=v)
for dummy_str, v in zip(dummy_strs, levels)]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=dtype),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=dtype)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=dtype)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=dtype).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
|
bsd-3-clause
|
Tjorriemorrie/trading
|
19_rf_kelly/main.py
|
1
|
2523
|
import logging as log
import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from indicators import ewma, rsi
DATA = [
{'currency': 'AUDUSDe', 'timeframe': 1440},
{'currency': 'EURGBPe', 'timeframe': 1440},
{'currency': 'EURJPYe', 'timeframe': 1440},
{'currency': 'EURUSDe', 'timeframe': 1440},
{'currency': 'GBPJPYe', 'timeframe': 1440},
{'currency': 'GBPUSDe', 'timeframe': 1440},
{'currency': 'NZDUSDe', 'timeframe': 1440},
{'currency': 'USDCADe', 'timeframe': 1440},
{'currency': 'USDCHFe', 'timeframe': 1440},
{'currency': 'USDJPYe', 'timeframe': 1440},
]
def loadData(currency, timeframe):
log.info('Data: loading...')
df = pd.read_csv(
r'../data/{0}{1}.csv'.format(currency, timeframe),
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
parse_dates=[['date', 'time']],
index_col=0,
)
# print df
log.info('Data: {0} loaded'.format(len(df)))
return df
def getLabels(df):
log.info('Getting labels...')
tmp = df.copy()
tmp['label'] = tmp['close'].shift(-1)
tmp['label'] = tmp.apply(lambda x: 'long' if x['label'] - x['close'] >= 0 else 'short', axis=1)
log.info('Labels set')
return tmp['label']
def splitAndScale(df, labels):
log.info('Scaling features')
features = df.copy()
# drop
features.drop(['open', 'high', 'low', 'close', 'volume'], axis=1, inplace=True)
# split
X_train, X_test, y_train, y_test = train_test_split(features, labels)
# scale
X_train = scale(X_train, axis=0, copy=False)
X_test = scale(X_test, axis=0, copy=False)
log.info('Scaled features')
return X_train, X_test, y_train, y_test
def addEwma(df, fibos):
log.info('Adding EWMA {0}'.format(fibos))
ewmas = {}
for n in fibos:
ewmas[n] = ewma(df, 'close', n)
for i, n in enumerate(fibos):
for m in fibos[i+1:]:
df['ewma_{0}_{1}'.format(n, m)] = ewmas[n] / ewmas[m]
log.info('Added EWMA {0}'.format(fibos))
def addRsi(df, fibos):
log.info('Adding RSI {0}'.format(fibos))
rsis = {}
for n in fibos:
rsis[n] = rsi(df, n)
for i, n in enumerate(fibos):
for m in fibos[i+1:]:
df['rsi_{0}_{1}'.format(n, m)] = rsis[n] / rsis[m]
df.replace(to_replace=[np.inf, -np.inf], value=0, method='ffil', inplace=True)
df.fillna(0, inplace=True)
log.info('Added RSI {0}'.format(fibos))
|
mit
|
mworles/capstone_one
|
src/features/travel_distance.py
|
1
|
2386
|
import pandas as pd
import numpy as np
import re
from geopy.distance import great_circle
# read in data files
dpath = "C:\Users\mworley\Dropbox\capstone\data"
tgames = pd.read_csv(dpath + r'\interim\tourney_games.csv')
tgames_cur = pd.read_csv(dpath + r'\interim\tourney_games_current.csv')
gameloc = pd.read_csv(dpath + r'\raw\TourneyGeog.csv')
teamloc = pd.read_csv(dpath + r'\raw\TeamGeog.csv')
tgames.columns = map(str.lower, tgames.columns)
gameloc.drop('daynum', axis=1, inplace=True)
gameloc.loc[gameloc.host == 'baton_rouge', ['lng']] = -91.19
gameloc_cur = gameloc[gameloc.season == 2017].copy()
gameloc_cur = pd.merge(gameloc_cur, tgames_cur, on='slot', copy=False)
gameloc_cur.rename(columns={'season_x': 'season'}, inplace=True)
kcols = ['season', 'w_team_id', 'l_team_id', 'slot', 'host', 'lat', 'lng']
gameloc_cur = gameloc_cur.loc[:, kcols]
gameloc = gameloc[gameloc.season != 2017]
gameloc.rename(columns={'wteam': 'w_team_id', 'lteam': 'l_team_id'},
inplace=True)
gameloc = pd.concat([gameloc, gameloc_cur])
tgames = tgames.drop(['daynum', 'w_score', 'l_score'], axis=1)
tgames_cur = tgames_cur.drop('slot', axis=1)
tgames = pd.concat([tgames, tgames_cur])
tgames = pd.merge(tgames, gameloc, how='inner',
on=['season', 'w_team_id', 'l_team_id'])
tgames.rename(columns={'lat': 'glat', 'lng': 'glng'}, inplace=True)
tgames = pd.merge(tgames, teamloc, how='inner',
left_on='w_team_id', right_on='team_id')
tgames.rename(columns={'lat': 'wlat', 'lng': 'wlng'}, inplace=True)
tgames = pd.merge(tgames, teamloc, how='inner',
left_on='l_team_id', right_on='team_id')
tgames.rename(columns={'lat': 'llat', 'lng': 'llng'}, inplace=True)
tgames['gloc'] = list(zip(tgames.glat, tgames.glng))
tgames['wloc'] = list(zip(tgames.wlat, tgames.wlng))
tgames['lloc'] = list(zip(tgames.llat, tgames.llng))
xl = []
yl = []
for i in range(len(tgames)):
x = int(great_circle(tgames['gloc'][i], tgames['wloc'][i]).miles)
y = int(great_circle(tgames['gloc'][i], tgames['lloc'][i]).miles)
xl.append(x)
yl.append(y)
tgames['w_dist'] = pd.Series(xl).values
tgames['l_dist'] = pd.Series(yl).values
tgames.sort_values(['season'], inplace=True)
tdist = tgames.loc[:, ['season', 'w_team_id', 'l_team_id', 'w_dist', 'l_dist']]
tdist.to_csv(dpath + r'\interim\tourney_dist.csv',
index=False, encoding='utf-8')
|
bsd-3-clause
|
toddstrader/deep-learning
|
weight-initialization/helper.py
|
153
|
3649
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.