repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
vigilv/scikit-learn
|
examples/plot_digits_pipe.py
|
250
|
1809
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
|
bsd-3-clause
|
rcrowder/nupic.audio
|
HTMforGenreClassification/plot_spectrogram.py
|
2
|
4628
|
#!/usr/bin/env python
# This utility will plot beautiful spectrograms of your sound files. You will have to specify a lot of parameters,
# but the good news is, the defaults will be set so that it will fit most people's needs.
#
# The parameters you have to set are:
# - Input file name
# - Frame step / Frame length (in samples)
# - Minimum and maximum frequency for analysis
# - Minimum and maximum time for analysis
# - Output width and height
import argparse
import marsyas
import marsyas_util
import time
import numpy
import math
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Quickly plot beautiful spectrograms for your audio files.')
parser.add_argument('--fname', dest='Filename', type=str, default='test.wav', help='Filename from where data will be extracted')
parser.add_argument('--flen', dest='Window_len', type=int, default=2048, help='Length (samples) of the window for analysis')
parser.add_argument('--fstep', dest='Window_step', type=int, default=1024, help='Step (samples) of the sliding window used for analysis')
parser.add_argument('--minfreq', dest='Min_freq', type=float, default=110, help='Minimum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxfreq', dest='Max_freq', type=float, default=8000, help='Maximum frequency (Hz) show in the spectrogram')
parser.add_argument('--maxtime', dest='Max_time', type=float, default=9000, help='Maximum time (s) show in the spectrogram')
parser.add_argument('--zeropad', dest='Zero_padding', type=float, default=1, help='Zero padding factor (the DFT is calculated after zero-padding the input to this times the input length - use 1 for standard DFT)')
parser.add_argument('--width', dest='Width', type=int, default=450, help='Width of the plot')
parser.add_argument('--height', dest='Height', type=int, default=200, help='Height of the plot')
parser.add_argument('--window', dest='Window', type=str, default='Hamming', help='Shape of the window that will be used to calculate the spectrogram')
args = parser.parse_args()
# Create our Marsyas network for audio analysis
spec_analyzer = ["Series/analysis", ["SoundFileSource/src", "Sum/summation", "Gain/gain", "ShiftInput/sft", "Windowing/win","Spectrum/spk","PowerSpectrum/pspk", "Memory/mem"]]
net = marsyas_util.create(spec_analyzer)
snet = marsyas_util.mar_refs(spec_analyzer)
# Configure the network
net.updControl(snet["src"]+"/mrs_string/filename", args.Filename)
nSamples = net.getControl(snet["src"]+"/mrs_natural/size").to_natural()
fs = net.getControl(snet["src"]+"/mrs_real/osrate").to_real()
dur = nSamples/fs
print "Opened ", args.Filename
print "It has ", nSamples, " samples at ", fs, " samples/second to a total of ", dur," seconds"
memFs = fs/args.Window_step # Sampling rate of the memory buffer
dur = min(dur, args.Max_time)
memSize = int(dur*memFs)
net.updControl("mrs_natural/inSamples", args.Window_step);
net.updControl(snet["gain"]+"/mrs_real/gain", args.Window_len*1.0); # This will un-normalize the DFT
net.updControl(snet["sft"]+"/mrs_natural/winSize", args.Window_len);
net.updControl(snet["win"]+"/mrs_natural/zeroPadding",args.Window_len * (args.Zero_padding-1));
net.updControl(snet["win"]+"/mrs_string/type", args.Window); # "Hamming", "Hanning", "Triangle", "Bartlett", "Blackman"
net.updControl(snet["pspk"]+"/mrs_string/spectrumType", "logmagnitude2"); # "power", "magnitude", "decibels", "logmagnitude" (for 1+log(magnitude*1000), "logmagnitude2" (for 1+log10(magnitude)), "powerdensity"
net.updControl(snet["mem"]+"/mrs_natural/memSize", memSize)
# Run the network to fill the memory
for i in range(memSize):
net.tick()
# Gather results to a numpy array
out = net.getControl("mrs_realvec/processedData").to_realvec()
DFT_Size = int(len(out)*1.0/memSize)
if numpy.ndim(out)==1:
out = numpy.array([out])
out = numpy.reshape(out,(memSize, DFT_Size))
out = numpy.transpose(out)
# Cut information that we do not want
minK = args.Min_freq*DFT_Size/fs
maxK = args.Max_freq*DFT_Size/fs
out = out[minK:maxK+1]
out = out/numpy.max(out)
out = 1-out
# Plot ALL the numbers!!!
im=plt.imshow(out, aspect='auto', origin='lower', cmap=plt.cm.autumn, extent=[0,dur,args.Min_freq,args.Max_freq])
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
fig = plt.gcf()
width_inches = args.Width/80.0
height_inches = args.Height/80.0
fig.set_size_inches((width_inches,height_inches))
#plt.savefig('out.png',bbox_inches='tight')
#plt.savefig('out.pdf',bbox_inches='tight')
plt.show()
|
gpl-3.0
|
joergdietrich/astropy
|
astropy/visualization/tests/test_units.py
|
2
|
1275
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
try:
import matplotlib.pyplot as plt
except ImportError:
HAS_PLT = False
else:
HAS_PLT = True
from ...tests.helper import pytest
from ... import units as u
from ..units import quantity_support
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg)
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(u.UnitConversionError):
plt.plot([105, 210, 315] * u.kg)
plt.clf()
|
bsd-3-clause
|
xiaohan2012/interaction-network-exploration
|
main.py
|
1
|
1836
|
import matplotlib as mpl
mpl.use('Agg')
from collections import Counter, defaultdict
from pprint import pprint
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
from pymongo import MongoClient
TAG_PREFIX = 'topics/companies/'
db = MongoClient()['bloomberg']
total_tag_count = 0
tag_freq = Counter()
# Company frequency
for a in db.articles.find():
total_tag_count += len(a['tags'])
for tag in a['tags']:
if tag.startswith(TAG_PREFIX):
tag_freq[tag] += 1
print('-' * 100)
print("Top 100 companies ranked by frequency:")
print('-' * 100)
pprint(tag_freq.most_common(100))
tags_to_be_considered = set([k for k, _ in tag_freq.most_common(10)])
datetime_by_tags = defaultdict(list)
# Company stacked area graph
for a in db.articles.find():
day_of_year = datetime.fromtimestamp(a['publish_time']).timetuple().tm_yday
for tag in a['tags']:
if tag in tags_to_be_considered:
datetime_by_tags[tag].append(day_of_year / 30)
rows = np.zeros((len(tags_to_be_considered), 12))
id2tag = {}
for i, (tag, months) in enumerate(datetime_by_tags.items()):
id2tag[i] = tag
for month in months:
rows[i][month] += 1
rows = np.cumsum(rows, axis=0)
# PLOT
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.arange(12)
colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99',
'#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a']
for i in xrange(len(tags_to_be_considered)):
if i == 0:
y1 = np.zeros(x.shape)
else:
y1 = rows[i-1, :]
y2 = rows[i, :]
ax.fill_between(x, y1, y2,
facecolor=colors[i], alpha=.7)
ax.text(2.5, (y1[3]+y2[3]) / 2, id2tag[i].split('/')[-1])
print(rows)
plt.savefig('/cs/fs/home/hxiao/public_html/company_frequency_stream.png')
|
mit
|
almartin82/hpk-daily
|
historic_stats_2015.py
|
1
|
1113
|
import yahoo_api
import yaml
import functions
import resources
from datetime import date, timedelta
import pandas
#read in credentials
with open("credentials.yml", 'r') as ymlfile:
creds = yaml.load(ymlfile)
#read the consumer key and secret from yaml
key = creds['consumer_key']
secret = creds['consumer_secret']
#initialize a yahoo session
y = yahoo_api.YahooAPI(
consumer_key=creds['consumer_key'],
consumer_secret=creds['consumer_secret'],
access_token=creds['access_token'],
access_token_secret=creds['access_token_secret'],
session_handle=creds['session_handle']
)
d = resources.yr_2015
dd = [d[0] + timedelta(days=x) for x in range((d[1]-d[0]).days + 1)]
# dd = [date(2012, 9, 8) + timedelta(days=x) for x in range((d[1]-date(2012, 9, 8)).days + 1)]
stat_df = pandas.DataFrame()
for day in dd:
print day
for team in resources.hpk_teams_cur:
r = functions.make_daily_stats_req(team, day)
raw = y.api_query(r)
df = functions.process_team_stats(raw)
stat_df = stat_df.append(df)
stat_df.to_csv('data\\team_by_date_2015.csv', index=False)
|
mit
|
chhao91/pysal
|
pysal/contrib/pdutilities/dbf_utilities.py
|
3
|
6673
|
"""miscellaneous file manipulation utilities
"""
import numpy as np
import pysal as ps
import pandas as pd
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps.open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <[email protected]>, Luc Anselin <[email protected]>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps.open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
|
bsd-3-clause
|
frank-tancf/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
27
|
4037
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
|
bsd-3-clause
|
MJuddBooth/pandas
|
scripts/tests/test_validate_docstrings.py
|
1
|
34703
|
import io
import random
import string
import textwrap
import pytest
import numpy as np
import pandas as pd
import validate_docstrings
validate_one = validate_docstrings.validate_one
class GoodDocStrings(object):
"""
Collection of good doc strings.
This class contains a lot of docstrings that should pass the validation
script without any errors.
"""
def plot(self, kind, color='blue', **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Parameters
----------
kind : str
Kind of matplotlib plot.
color : str, default 'blue'
Color name or rgb code.
**kwargs
These parameters will be passed to the matplotlib plotting
function.
"""
pass
def sample(self):
"""
Generate and return a random number.
The value is sampled from a continuous uniform distribution between
0 and 1.
Returns
-------
float
Random number generated.
"""
return random.random()
def random_letters(self):
"""
Generate and return a sequence of random letters.
The length of the returned string is also random, and is also
returned.
Returns
-------
length : int
Length of the returned string.
letters : str
String of random letters.
"""
length = random.randint(1, 10)
letters = "".join(random.sample(string.ascii_lowercase, length))
return length, letters
def sample_values(self):
"""
Generate an infinite sequence of random numbers.
The values are sampled from a continuous uniform distribution between
0 and 1.
Yields
------
float
Random number generated.
"""
while True:
yield random.random()
def head(self):
"""
Return the first 5 elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Returns
-------
Series
Subset of the original series with the 5 first values.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
return self.iloc[:5]
def head1(self, n=5):
"""
Return the first elements of the Series.
This function is mainly useful to preview the values of the
Series without displaying the whole of it.
Parameters
----------
n : int
Number of values to return.
Returns
-------
Series
Subset of the original series with the n first values.
See Also
--------
tail : Return the last n elements of the Series.
Examples
--------
>>> s = pd.Series(['Ant', 'Bear', 'Cow', 'Dog', 'Falcon'])
>>> s.head()
0 Ant
1 Bear
2 Cow
3 Dog
4 Falcon
dtype: object
With the `n` parameter, we can change the number of returned rows:
>>> s.head(n=3)
0 Ant
1 Bear
2 Cow
dtype: object
"""
return self.iloc[:n]
def contains(self, pat, case=True, na=np.nan):
"""
Return whether each value contains `pat`.
In this case, we are illustrating how to use sections, even
if the example is simple enough and does not require them.
Parameters
----------
pat : str
Pattern to check for within each element.
case : bool, default True
Whether check should be done with case sensitivity.
na : object, default np.nan
Fill value for missing data.
Examples
--------
>>> s = pd.Series(['Antelope', 'Lion', 'Zebra', np.nan])
>>> s.str.contains(pat='a')
0 False
1 False
2 True
3 NaN
dtype: object
**Case sensitivity**
With `case_sensitive` set to `False` we can match `a` with both
`a` and `A`:
>>> s.str.contains(pat='a', case=False)
0 True
1 False
2 True
3 NaN
dtype: object
**Missing values**
We can fill missing values in the output using the `na` parameter:
>>> s.str.contains(pat='a', na=False)
0 False
1 False
2 True
3 False
dtype: bool
"""
pass
def mode(self, axis, numeric_only):
"""
Ensure sphinx directives don't affect checks for trailing periods.
Parameters
----------
axis : str
Sentence ending in period, followed by single directive.
.. versionchanged:: 0.1.2
numeric_only : bool
Sentence ending in period, followed by multiple directives.
.. versionadded:: 0.1.2
.. deprecated:: 0.00.0
A multiline description,
which spans another line.
"""
pass
def good_imports(self):
"""
Ensure import other than numpy and pandas are fine.
Examples
--------
This example does not import pandas or import numpy.
>>> import datetime
>>> datetime.MAXYEAR
9999
"""
pass
class BadGenericDocStrings(object):
"""Everything here has a bad docstring
"""
def func(self):
"""Some function.
With several mistakes in the docstring.
It has a blank like after the signature `def func():`.
The text 'Some function' should go in the line after the
opening quotes of the docstring, not in the same line.
There is a blank line between the docstring and the first line
of code `foo = 1`.
The closing quotes should be in the next line, not in this one."""
foo = 1
bar = 2
return foo + bar
def astype(self, dtype):
"""
Casts Series type.
Verb in third-person of the present simple, should be infinitive.
"""
pass
def astype1(self, dtype):
"""
Method to cast Series type.
Does not start with verb.
"""
pass
def astype2(self, dtype):
"""
Cast Series type
Missing dot at the end.
"""
pass
def astype3(self, dtype):
"""
Cast Series type from its current type to the new type defined in
the parameter dtype.
Summary is too verbose and doesn't fit in a single line.
"""
pass
def two_linebreaks_between_sections(self, foo):
"""
Test linebreaks message GL03.
Note 2 blank lines before parameters section.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def linebreak_at_end_of_docstring(self, foo):
"""
Test linebreaks message GL03.
Note extra blank line at end of docstring.
Parameters
----------
foo : str
Description of foo parameter.
"""
pass
def plot(self, kind, **kwargs):
"""
Generate a plot.
Render the data in the Series as a matplotlib plot of the
specified kind.
Note the blank line between the parameters title and the first
parameter. Also, note that after the name of the parameter `kind`
and before the colon, a space is missing.
Also, note that the parameter descriptions do not start with a
capital letter, and do not finish with a dot.
Finally, the `**kwargs` parameter is missing.
Parameters
----------
kind: str
kind of matplotlib plot
"""
pass
def method(self, foo=None, bar=None):
"""
A sample DataFrame method.
Do not import numpy and pandas.
Try to use meaningful data, when it makes the example easier
to understand.
Try to avoid positional arguments like in `df.method(1)`. They
can be alright if previously defined with a meaningful name,
like in `present_value(interest_rate)`, but avoid them otherwise.
When presenting the behavior with different parameters, do not place
all the calls one next to the other. Instead, add a short sentence
explaining what the example shows.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
def private_classes(self):
"""
This mentions NDFrame, which is not correct.
"""
def unknown_section(self):
"""
This section has an unknown section title.
Unknown Section
---------------
This should raise an error in the validation.
"""
def sections_in_wrong_order(self):
"""
This docstring has the sections in the wrong order.
Parameters
----------
name : str
This section is in the right position.
Examples
--------
>>> print('So far Examples is good, as it goes before Parameters')
So far Examples is good, as it goes before Parameters
See Also
--------
function : This should generate an error, as See Also needs to go
before Examples.
"""
def deprecation_in_wrong_order(self):
"""
This docstring has the deprecation warning in the wrong order.
This is the extended summary. The correct order should be
summary, deprecation warning, extended summary.
.. deprecated:: 1.0
This should generate an error as it needs to go before
extended summary.
"""
def method_wo_docstrings(self):
pass
class BadSummaries(object):
def wrong_line(self):
"""Exists on the wrong line"""
pass
def no_punctuation(self):
"""
Has the right line but forgets punctuation
"""
pass
def no_capitalization(self):
"""
provides a lowercase summary.
"""
pass
def no_infinitive(self):
"""
Started with a verb that is not infinitive.
"""
def multi_line(self):
"""
Extends beyond one line
which is not correct.
"""
def two_paragraph_multi_line(self):
"""
Extends beyond one line
which is not correct.
Extends beyond one line, which in itself is correct but the
previous short summary should still be an issue.
"""
class BadParameters(object):
"""
Everything here has a problem with its Parameters section.
"""
def missing_params(self, kind, **kwargs):
"""
Lacks kwargs in Parameters.
Parameters
----------
kind : str
Foo bar baz.
"""
def bad_colon_spacing(self, kind):
"""
Has bad spacing in the type line.
Parameters
----------
kind: str
Needs a space after kind.
"""
def no_description_period(self, kind):
"""
Forgets to add a period to the description.
Parameters
----------
kind : str
Doesn't end with a dot
"""
def no_description_period_with_directive(self, kind):
"""
Forgets to add a period, and also includes a directive.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionadded:: 0.00.0
"""
def no_description_period_with_directives(self, kind):
"""
Forgets to add a period, and also includes multiple directives.
Parameters
----------
kind : str
Doesn't end with a dot
.. versionchanged:: 0.00.0
.. deprecated:: 0.00.0
"""
def parameter_capitalization(self, kind):
"""
Forgets to capitalize the description.
Parameters
----------
kind : str
this is not capitalized.
"""
def blank_lines(self, kind):
"""
Adds a blank line after the section header.
Parameters
----------
kind : str
Foo bar baz.
"""
pass
def integer_parameter(self, kind):
"""
Uses integer instead of int.
Parameters
----------
kind : integer
Foo bar baz.
"""
pass
def string_parameter(self, kind):
"""
Uses string instead of str.
Parameters
----------
kind : string
Foo bar baz.
"""
pass
def boolean_parameter(self, kind):
"""
Uses boolean instead of bool.
Parameters
----------
kind : boolean
Foo bar baz.
"""
pass
def list_incorrect_parameter_type(self, kind):
"""
Uses list of boolean instead of list of bool.
Parameters
----------
kind : list of boolean, integer, float or string
Foo bar baz.
"""
pass
class BadReturns(object):
def return_not_documented(self):
"""
Lacks section for Returns
"""
return "Hello world!"
def yield_not_documented(self):
"""
Lacks section for Yields
"""
yield "Hello world!"
def no_type(self):
"""
Returns documented but without type.
Returns
-------
Some value.
"""
return "Hello world!"
def no_description(self):
"""
Provides type but no descrption.
Returns
-------
str
"""
return "Hello world!"
def no_punctuation(self):
"""
Provides type and description but no period.
Returns
-------
str
A nice greeting
"""
return "Hello world!"
def named_single_return(self):
"""
Provides name but returns only one value.
Returns
-------
s : str
A nice greeting.
"""
return "Hello world!"
def no_capitalization(self):
"""
Forgets capitalization in return values description.
Returns
-------
foo : str
The first returned string.
bar : str
the second returned string.
"""
return "Hello", "World!"
def no_period_multi(self):
"""
Forgets period in return values description.
Returns
-------
foo : str
The first returned string
bar : str
The second returned string.
"""
return "Hello", "World!"
class BadSeeAlso(object):
def desc_no_period(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : Return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n
"""
pass
def desc_first_letter_lowercase(self):
"""
Return the first 5 elements of the Series.
See Also
--------
Series.tail : return the last 5 elements of the Series.
Series.iloc : Return a slice of the elements in the Series,
which can also be used to return the first or last n.
"""
pass
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
class BadExamples(object):
def unused_import(self):
"""
Examples
--------
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
pass
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
pass
class TestValidator(object):
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_good_class(self, capsys):
errors = validate_one(self._import_path(
klass='GoodDocStrings'))['errors']
assert isinstance(errors, list)
assert not errors
@pytest.mark.parametrize("func", [
'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
'contains', 'mode', 'good_imports'])
def test_good_functions(self, capsys, func):
errors = validate_one(self._import_path(
klass='GoodDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert not errors
def test_bad_class(self, capsys):
errors = validate_one(self._import_path(
klass='BadGenericDocStrings'))['errors']
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize("func", [
'func', 'astype', 'astype1', 'astype2', 'astype3', 'plot', 'method',
'private_classes',
])
def test_bad_generic_functions(self, capsys, func):
errors = validate_one(self._import_path( # noqa:F821
klass='BadGenericDocStrings', func=func))['errors']
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize("klass,func,msgs", [
# See Also tests
('BadGenericDocStrings', 'private_classes',
("Private classes (NDFrame) should not be mentioned in public "
'docstrings',)),
('BadGenericDocStrings', 'unknown_section',
('Found unknown section "Unknown Section".',)),
('BadGenericDocStrings', 'sections_in_wrong_order',
('Sections are in the wrong order. Correct order is: Parameters, '
'See Also, Examples',)),
('BadGenericDocStrings', 'deprecation_in_wrong_order',
('Deprecation warning should precede extended summary',)),
('BadSeeAlso', 'desc_no_period',
('Missing period at end of description for See Also "Series.iloc"',)),
('BadSeeAlso', 'desc_first_letter_lowercase',
('should be capitalized for See Also "Series.tail"',)),
# Summary tests
('BadSummaries', 'wrong_line',
('should start in the line immediately after the opening quotes',)),
('BadSummaries', 'no_punctuation',
('Summary does not end with a period',)),
('BadSummaries', 'no_capitalization',
('Summary does not start with a capital letter',)),
('BadSummaries', 'no_capitalization',
('Summary must start with infinitive verb',)),
('BadSummaries', 'multi_line',
('Summary should fit in a single line',)),
('BadSummaries', 'two_paragraph_multi_line',
('Summary should fit in a single line',)),
# Parameters tests
('BadParameters', 'missing_params',
('Parameters {**kwargs} not documented',)),
('BadParameters', 'bad_colon_spacing',
('Parameter "kind" requires a space before the colon '
'separating the parameter name and type',)),
('BadParameters', 'no_description_period',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'no_description_period_with_directive',
('Parameter "kind" description should finish with "."',)),
('BadParameters', 'parameter_capitalization',
('Parameter "kind" description should start with a capital letter',)),
('BadParameters', 'integer_parameter',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'string_parameter',
('Parameter "kind" type should use "str" instead of "string"',)),
('BadParameters', 'boolean_parameter',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "bool" instead of "boolean"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "int" instead of "integer"',)),
('BadParameters', 'list_incorrect_parameter_type',
('Parameter "kind" type should use "str" instead of "string"',)),
pytest.param('BadParameters', 'blank_lines', ('No error yet?',),
marks=pytest.mark.xfail),
# Returns tests
('BadReturns', 'return_not_documented', ('No Returns section found',)),
('BadReturns', 'yield_not_documented', ('No Yields section found',)),
pytest.param('BadReturns', 'no_type', ('foo',),
marks=pytest.mark.xfail),
('BadReturns', 'no_description',
('Return value has no description',)),
('BadReturns', 'no_punctuation',
('Return value description should finish with "."',)),
('BadReturns', 'named_single_return',
('The first line of the Returns section should contain only the '
'type, unless multiple values are being returned',)),
('BadReturns', 'no_capitalization',
('Return value description should start with a capital '
'letter',)),
('BadReturns', 'no_period_multi',
('Return value description should finish with "."',)),
# Examples tests
('BadGenericDocStrings', 'method',
('Do not import numpy, as it is imported automatically',)),
('BadGenericDocStrings', 'method',
('Do not import pandas, as it is imported automatically',)),
('BadGenericDocStrings', 'method_wo_docstrings',
("The object does not have a docstring",)),
# See Also tests
('BadSeeAlso', 'prefix_pandas',
('pandas.Series.rename in `See Also` section '
'does not need `pandas` prefix',)),
# Examples tests
('BadExamples', 'unused_import',
("flake8 error: F401 'pandas as pdf' imported but unused",)),
('BadExamples', 'indentation_is_not_a_multiple_of_four',
('flake8 error: E111 indentation is not a multiple of four',)),
('BadExamples', 'missing_whitespace_around_arithmetic_operator',
('flake8 error: '
'E226 missing whitespace around arithmetic operator',)),
('BadExamples', 'missing_whitespace_after_comma',
("flake8 error: E231 missing whitespace after ',' (3 times)",)),
('BadGenericDocStrings', 'two_linebreaks_between_sections',
('Double line break found; please use only one blank line to '
'separate sections or paragraphs, and do not leave blank lines '
'at the end of docstrings',)),
('BadGenericDocStrings', 'linebreak_at_end_of_docstring',
('Double line break found; please use only one blank line to '
'separate sections or paragraphs, and do not leave blank lines '
'at the end of docstrings',)),
])
def test_bad_docstrings(self, capsys, klass, func, msgs):
result = validate_one(self._import_path(klass=klass, func=func))
for msg in msgs:
assert msg in ' '.join(err[1] for err in result['errors'])
def test_validate_all_ignore_deprecated(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_one', lambda func_name: {
'docstring': 'docstring1',
'errors': [('ER01', 'err desc'),
('ER02', 'err desc'),
('ER03', 'err desc')],
'warnings': [],
'examples_errors': '',
'deprecated': True})
result = validate_docstrings.validate_all(prefix=None,
ignore_deprecated=True)
assert len(result) == 0
class TestApiItems(object):
@property
def api_doc(self):
return io.StringIO(textwrap.dedent('''
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
'''))
@pytest.mark.parametrize('idx,name', [(0, 'itertools.cycle'),
(1, 'itertools.count'),
(2, 'itertools.chain'),
(3, 'random.seed'),
(4, 'random.randint')])
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize('idx,func', [(0, 'cycle'),
(1, 'count'),
(2, 'chain'),
(3, 'seed'),
(4, 'randint')])
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize('idx,section', [(0, 'Itertools'),
(1, 'Itertools'),
(2, 'Itertools'),
(3, 'Random'),
(4, 'Random')])
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize('idx,subsection', [(0, 'Infinite'),
(1, 'Infinite'),
(2, 'Finite'),
(3, 'All'),
(4, 'All')])
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
class TestDocstringClass(object):
@pytest.mark.parametrize('name, expected_obj',
[('pandas.isnull', pd.isnull),
('pandas.DataFrame', pd.DataFrame),
('pandas.Series.sum', pd.Series.sum)])
def test_resolves_class_name(self, name, expected_obj):
d = validate_docstrings.Docstring(name)
assert d.obj is expected_obj
@pytest.mark.parametrize('invalid_name', ['panda', 'panda.DataFrame'])
def test_raises_for_invalid_module_name(self, invalid_name):
msg = 'No module can be imported from "{}"'.format(invalid_name)
with pytest.raises(ImportError, match=msg):
validate_docstrings.Docstring(invalid_name)
@pytest.mark.parametrize('invalid_name',
['pandas.BadClassName',
'pandas.Series.bad_method_name'])
def test_raises_for_invalid_attribute_name(self, invalid_name):
name_components = invalid_name.split('.')
obj_name, invalid_attr_name = name_components[-2], name_components[-1]
msg = "'{}' has no attribute '{}'".format(obj_name, invalid_attr_name)
with pytest.raises(AttributeError, match=msg):
validate_docstrings.Docstring(invalid_name)
class TestMainFunction(object):
def test_exit_status_for_validate_one(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_one', lambda func_name: {
'docstring': 'docstring1',
'errors': [('ER01', 'err desc'),
('ER02', 'err desc'),
('ER03', 'err desc')],
'warnings': [],
'examples_errors': ''})
exit_status = validate_docstrings.main(func_name='docstring1',
prefix=None,
errors=[],
output_format='default',
ignore_deprecated=False)
assert exit_status == 0
def test_exit_status_errors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_all',
lambda prefix, ignore_deprecated=False: {
'docstring1': {'errors': [('ER01', 'err desc'),
('ER02', 'err desc'),
('ER03', 'err desc')],
'file': 'module1.py',
'file_line': 23},
'docstring2': {'errors': [('ER04', 'err desc'),
('ER05', 'err desc')],
'file': 'module2.py',
'file_line': 925}})
exit_status = validate_docstrings.main(func_name=None,
prefix=None,
errors=[],
output_format='default',
ignore_deprecated=False)
assert exit_status == 5
def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_all',
lambda prefix, ignore_deprecated=False: {
'docstring1': {'errors': [],
'warnings': [('WN01', 'warn desc')]},
'docstring2': {'errors': []}})
exit_status = validate_docstrings.main(func_name=None,
prefix=None,
errors=[],
output_format='default',
ignore_deprecated=False)
assert exit_status == 0
def test_exit_status_for_validate_all_json(self, monkeypatch):
print('EXECUTED')
monkeypatch.setattr(
validate_docstrings, 'validate_all',
lambda prefix, ignore_deprecated=False: {
'docstring1': {'errors': [('ER01', 'err desc'),
('ER02', 'err desc'),
('ER03', 'err desc')]},
'docstring2': {'errors': [('ER04', 'err desc'),
('ER05', 'err desc')]}})
exit_status = validate_docstrings.main(func_name=None,
prefix=None,
errors=[],
output_format='json',
ignore_deprecated=False)
assert exit_status == 0
def test_errors_param_filters_errors(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings, 'validate_all',
lambda prefix, ignore_deprecated=False: {
'Series.foo': {'errors': [('ER01', 'err desc'),
('ER02', 'err desc'),
('ER03', 'err desc')],
'file': 'series.py',
'file_line': 142},
'DataFrame.bar': {'errors': [('ER01', 'err desc'),
('ER02', 'err desc')],
'file': 'frame.py',
'file_line': 598},
'Series.foobar': {'errors': [('ER01', 'err desc')],
'file': 'series.py',
'file_line': 279}})
exit_status = validate_docstrings.main(func_name=None,
prefix=None,
errors=['ER01'],
output_format='default',
ignore_deprecated=False)
assert exit_status == 3
exit_status = validate_docstrings.main(func_name=None,
prefix=None,
errors=['ER03'],
output_format='default',
ignore_deprecated=False)
assert exit_status == 1
|
bsd-3-clause
|
nelango/ViralityAnalysis
|
model/lib/pandas/tests/test_lib.py
|
9
|
10636
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas.compat import u, PY2
class TestMisc(tm.TestCase):
def test_max_len_string_array(self):
arr = a = np.array(['foo', 'b', np.nan], dtype='object')
self.assertTrue(lib.max_len_string_array(arr), 3)
# unicode
arr = a.astype('U').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# bytes for python3
arr = a.astype('S').astype(object)
self.assertTrue(lib.max_len_string_array(arr), 3)
# raises
tm.assertRaises(TypeError,
lambda: lib.max_len_string_array(arr.astype('U')))
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_maybe_indices_to_slice_left_edge(self):
target = np.arange(100)
# slice
indices = np.array([], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
for end in [1, 2, 5, 20, 99]:
for step in [1, 2, 4]:
indices = np.arange(0, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_right_edge(self):
target = np.arange(100)
# slice
for start in [0, 2, 5, 20, 97, 98]:
for step in [1, 2, 4]:
indices = np.arange(start, 99, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
indices = np.array([97, 98, 99, 100], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
indices = np.array([100, 99, 98, 97], dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
with self.assertRaises(IndexError):
target[indices]
with self.assertRaises(IndexError):
target[maybe_slice]
for case in [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_both_edges(self):
target = np.arange(10)
# slice
for step in [1, 2, 4, 5, 8, 9]:
indices = np.arange(0, 9, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_maybe_indices_to_slice_middle(self):
target = np.arange(100)
# slice
for start, end in [(2, 10), (5, 25), (65, 97)]:
for step in [1, 2, 4, 20]:
indices = np.arange(start, end, step, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# reverse
indices = indices[::-1]
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertTrue(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
# not slice
for case in [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]:
indices = np.array(case, dtype=np.int64)
maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
self.assertFalse(isinstance(maybe_slice, slice))
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
def test_isinf_scalar(self):
#GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1,)))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1),
np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(np.timedelta64(1, 'h'))]:
self.assertFalse(lib.isscalar(zerodim))
self.assertTrue(lib.isscalar(lib.item_from_zerodim(zerodim)))
def test_isscalar_numpy_arrays(self):
self.assertFalse(lib.isscalar(np.array([])))
self.assertFalse(lib.isscalar(np.array([[]])))
self.assertFalse(lib.isscalar(np.matrix('1; 2')))
def test_isscalar_pandas_scalars(self):
self.assertTrue(lib.isscalar(pd.Timestamp('2014-01-01')))
self.assertTrue(lib.isscalar(pd.Timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.Period('2014-01-01')))
def test_lisscalar_pandas_containers(self):
self.assertFalse(lib.isscalar(pd.Series()))
self.assertFalse(lib.isscalar(pd.Series([1])))
self.assertFalse(lib.isscalar(pd.DataFrame()))
self.assertFalse(lib.isscalar(pd.DataFrame([[1]])))
self.assertFalse(lib.isscalar(pd.Panel()))
self.assertFalse(lib.isscalar(pd.Panel([[[1]]])))
self.assertFalse(lib.isscalar(pd.Index([])))
self.assertFalse(lib.isscalar(pd.Index([1])))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
mit
|
nkurinsky/SurveySim
|
examples/plot_nice_images.py
|
1
|
3517
|
#!/usr/bin/env python
import sys
from astropy.io import fits
from matplotlib import gridspec
import matplotlib.cm as cm
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.figure as fig
import numpy as np
from numpy import std
from scipy import stats
from scipy.stats import norm
from math import ceil
import os
import Tkinter as tk
from matplotlib.colors import Normalize
from SurveySim.OutputFile import *
if(len(sys.argv) < 2):
print "Calling Sequence: "+sys.argv[0]+" outputfile"
quit()
else:
ofile=sys.argv[1]
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plotImage(image,interpolation='nearest',labelCbar=True,annotateXY=None,cmax=105):
print image.name
tmpimg=np.flipud(image.img)
cmap=cm.Greys
clim=[0,cmax]
if(image.name == 'Residual'):
cmap=cm.bwr
clim=[-cmax,cmax]
if(image.name == 'Model Diagnostic'):
cmap=cm.Blues
if(image.name=='Observation Diagnostic'):
cmap=cm.Reds
plt.imshow(tmpimg, interpolation=interpolation, cmap=cmap,extent=image.extent, aspect='auto')
if(image.name == 'Residual'):
gpts=(tmpimg > 0)
print 'St dev of residual: ',np.std(tmpimg[gpts])
std_str=np.str(np.std(tmpimg[gpts]))
if(annotateXY != None):
plt.annotate('st.dev='+std_str[:4],xy=(annotateXY[0]+.15,annotateXY[1]-0.2),fontsize=15)
plt.clim(clim)
cbar=plt.colorbar()
if(labelCbar):
cbar.set_label("Normalized Density")
def plotImages(obs,xrange=None,yrange=None,axis1_name=None,axis2_name=None,annotateXY=None,cmax=105):
col=0
for key in obs.images.keys():
if(key == "Observation Diagnostic"):
col=1
lab='Observed'
if(key == "Residual"):
col=3
lab='Residual'
if(key == "Model Diagnostic"):
col=2
lab='Model'
ax=plt.subplot(1,3,col)
if(key == 'Residual'):
obs.norm = MidpointNormalize(midpoint=0)
if(key == 'Observation Diagnostic'):
obs.norm = MidpointNormalize(midpoint=-150)
plotImage(obs.images[key],labelCbar=False,annotateXY=annotateXY,cmax=cmax)
if(col > 1):
plt.ylabel("")
if(xrange != None):
plt.xlim(xrange[0],xrange[1])
if(yrange != None):
plt.ylim(yrange[0],yrange[1])
plt.xlabel(axis1_name)
plt.ylabel(axis2_name)
if(annotateXY != None):
ax.annotate(lab,xy=annotateXY,fontsize=20)
else:
plt.title(lab,fontsize=20)
plt.tight_layout(True)
def showImages(obs,block=True,xrange=None,yrange=None,axis1_name=None,axis2_name=None,annotateXY=None,cmax=105):
plt.figure(figsize=(12,4))
plotImages(obs,xrange=xrange,yrange=yrange,axis1_name=axis1_name,axis2_name=axis2_name,annotateXY=annotateXY,cmax=cmax)
plt.show(block=block)
output=OutputFile(ofile)
axis1_name=r'log($S_{24}$/mJy)'
axis2_name=r'log($S_{250}/S_{24}$)'
showImages(output,xrange=[-1.3,1.0],yrange=[0.5,3],axis1_name=axis1_name,axis2_name=axis2_name,annotateXY=(-0.7,3*0.85))
|
mit
|
jseabold/scikit-learn
|
sklearn/utils/testing.py
|
14
|
27118
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
TianpeiLuke/GParML
|
kernels.py
|
2
|
6105
|
###############################################################################
# kernels.py
# Implement code to generate kernel matrices. So far, we only need the ARD
# squared exponential kernel. Technically, you could use any package to do this
# like GPy, but we decided to write our own function for some reason.
###############################################################################
import numpy as np
from scipy.spatial import distance
class ArdHypers(object):
'''
ArdHypers
Container class for the hyperparameters of the ARD squared exponential
kernel.
'''
def __init__(self, D, sf=1.0, ll=1.0, ard=None):
self.D = D
self.sf = sf
if ard==None:
self.ard = np.ones(D) * ll
else:
self.ard = np.atleast_1d(np.array(ard).squeeze())
assert self.ard.ndim == 1
@property
def ll(self):
if (np.all(self.ard == self.ard[0])):
return self.ard[0]
else:
raise ValueError("RBF kernel is not isotropic")
@ll.setter
def ll(self, value):
self.ard = np.ones(self.D) * value
###############################################################################
# TODO: Use ArdHypers class in rbf.
# Update view code.
###############################################################################
class rbf:
def __init__(self, D, ll=1.0, sf=1.0, ard=None):
'''
__init__
Constructor for the rbf kernel.
Args:
ll : Length scale parameter, if no ARD coefficients are used.
sf : Marginal variance of the GP.
ard: D element numpy array of length scales for each dimension.
'''
assert sf > 0.0
self.D = D
self.sf = sf
if ard is None:
self.ard = np.ones(D) * ll
else:
self.ard = np.array(ard)
@property
def ll(self):
if (np.all(self.ard == self.ard[0])):
return self.ard[0]
else:
raise ValueError("RBF kernel is not isotropic.")
@ll.setter
def ll(self, value):
self.ard = np.ones(self.D) * value
def K(self, X, X2=None):
"""
rbf
Implements the ARD RBF kernel.
Args:
X : NxD numpy array of input points. N is the number of points, D
their dimension. I.e. each data point is a ROW VECTOR (same
convention as in GPML).
X2 : Optional N2xD numpy array of input points. If it is given, the
cross-covariances are calculated.
Returns:
K_X1X2 or K_XX covariance matrix (NxN2 or NxN respectively).
"""
# Assume we want to calculate the self-covariance if no second dataset is
# given.
if X2==None:
X2 = X
# Ensure that we can accept 1D arrays as well, where we assume the input
# is 1 dimensional.
if (X.ndim == 1):
X = np.atleast_2d(X)
X2 = np.atleast_2d(X2)
# Get data shapes & dimensions etc.
N = X.shape[0]
D = X.shape[1]
N2 = X2.shape[0]
assert D == self.D
# Dimensions must be equal. Assert for debug purposes.
assert X.shape[1] == X2.shape[1]
# Actually calculate the covariance matrix
K = distance.cdist(X, X2, 'seuclidean', V=np.atleast_1d(2*self.ard**2))
assert K.shape == (N, N2)
K = self.sf**2 * np.exp(-K**2)
return K
###############################################################################
# If run as main, run either unit tests or just some visualisations.
###############################################################################
if __name__ == '__main__':
import unittest
import argparse
import matplotlib.pyplot as plt
import numpy.random as rnd
import numpy.linalg as linalg
# Parse the arguments...
parser = argparse.ArgumentParser(description='Housekeeping for kernels.py.')
parser.add_argument('-v', '--view', help="View a covariance matrix.", action="store_true")
parser.add_argument('-t', '--test', help="Run the unit tests.", action="store_true")
args = parser.parse_args()
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
pass
def test_1(self):
X = np.atleast_2d(rnd.uniform(-5.0, 5.0, 10*3)).T
kern = rbf(1, 2.0, 4.0)
K = kern.K(X)
a = 3
b = 5
self.assertEqual(K[a, b], 16.0 * np.exp(-(X[a] - X[b])**2/4.0))
def test_2(self):
kern1 = rbf(3, ard=np.array([1.0, 1.0, float('inf')]))
kern2 = rbf(2, ard=np.array([1.0, 1.0]))
X1 = np.reshape(rnd.uniform(-5.0, 5.0, 10*3), (10, 3))
X2 = np.reshape(rnd.uniform(-5.0, 5.0, 5*3), (5, 3))
#Ka = rbf(X1, X2, ard=np.array([1.0, 1.0, float('inf')]))
#Kb = rbf(X1[:, 0:2], X2[:, 0:2], ard=np.array([1.0, 1.0]))
Ka = kern1.K(X1, X2)
Kb = kern2.K(X1[:, 0:2], X2[:, 0:2])
self.assertTrue(Ka.shape == (10, 5))
self.assertTrue((Ka == Kb).all())
def test_hypstruct(self):
# Just needs to get through this with no errors
m = ArdHypers(3, 3.0, ard=[1.0, 4.0, 3.0])
m.ll = 3
if args.view:
X = rnd.uniform(-2.0, 2.0, 200)
K = rbf(X, X)
Xs = np.sort(X)
Ks = rbf(Xs)
fig = plt.figure(1)
plt.clf()
cax = plt.imshow(K, interpolation='none')
fig.colorbar(cax)
plt.draw()
fig = plt.figure(2)
plt.clf()
cax = plt.imshow(Ks, interpolation='none')
fig.colorbar(cax)
plt.draw()
# Draw a GP with the covariance matrix
y = rnd.multivariate_normal(np.zeros(200), K)
plt.figure(3)
plt.plot(X, y, 'x')
plt.show()
if args.test:
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
|
bsd-3-clause
|
zaxtax/scikit-learn
|
benchmarks/bench_plot_randomized_svd.py
|
38
|
17557
|
"""
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file_name):
with open(file_name, 'rb') as fo:
return pickle.load(fo, encoding='latin1')["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
|
bsd-3-clause
|
pypot/scikit-learn
|
sklearn/neighbors/tests/test_kde.py
|
208
|
5556
|
import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
|
bsd-3-clause
|
xujun10110/king-phisher
|
docs/source/conf.py
|
2
|
9503
|
# -*- coding: utf-8 -*-
#
# King Phisher documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 13 09:54:27 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
GITHUB_BRANCH = 'dev'
GITHUB_REPO = 'securestate/king-phisher'
import sys
import os
_prj_root = os.path.dirname(__file__)
_prj_root = os.path.relpath(os.path.join('..', '..'), _prj_root)
_prj_root = os.path.abspath(_prj_root)
sys.path.insert(1, _prj_root)
_pkg = os.path.join(_prj_root, 'king_phisher', 'third_party')
sys.path.insert(2, _pkg)
del _prj_root, _pkg
sys.modules['ipaddress'] = type('ipaddress', (), {})
import king_phisher.version
import king_phisher.utilities
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'king_phisher.rpc_docs',
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinxcontrib.httpdomain'
]
extlinks = {
'release': ("https://github.com/{0}/releases/tag/v%s".format(GITHUB_REPO), 'v')
}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
file_name = info['module'].replace('.', '/') + '.py'
return "https://github.com/{0}/blob/{1}/{2}".format(GITHUB_REPO, GITHUB_BRANCH, file_name)
intersphinx_mapping = {
'glib': ('http://lazka.github.io/pgi-docs/GLib-2.0/', None),
'gobject': ('http://lazka.github.io/pgi-docs/GObject-2.0/', None),
'gtk': ('http://lazka.github.io/pgi-docs/Gtk-3.0/', None),
'smokezephyr': ('https://smoke-zephyr.readthedocs.org/en/latest/', None),
'webkit2': ('http://lazka.github.io/pgi-docs/WebKit2-4.0/', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'King Phisher'
copyright = '2013-2015, SecureState LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = king_phisher.version.version.split('-')[0]
# The full version, including alpha/beta/rc tags.
release = king_phisher.version.distutils_version
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# HTTP domain specifc settings http://pythonhosted.org/sphinxcontrib-httpdomain/#additional-configuration
http_index_shortname = 'rest-api'
http_index_localname = "{0} REST API".format(project)
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'king_phisher_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KingPhisher.tex', u'King Phisher Documentation', u'Spencer McIntyre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'kingphisher', u'King Phisher Documentation', [u'Spencer McIntyre'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'KingPhisher', u'King Phisher Documentation', u'Spencer McIntyre', 'KingPhisher', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# mock specific external packages
MOCK_MODULES = [
'gi',
'gi.repository',
'ipaddress',
'matplotlib',
'matplotlib.backends',
'matplotlib.backends.backend_gtk3',
'matplotlib.backends.backend_gtk3agg',
'matplotlib.figure'
]
sys.modules.update((mod_name, king_phisher.utilities.Mock()) for mod_name in MOCK_MODULES)
|
bsd-3-clause
|
nelson-liu/scikit-learn
|
sklearn/utils/tests/test_random.py
|
85
|
7349
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
|
bsd-3-clause
|
bmassman/fake_news
|
fake_news/pipeline/db_transformer.py
|
1
|
9097
|
#!/usr/bin/env python3
"""
Script to transform dataset to prepare for modeling.
"""
import os
import csv
import re
from typing import Sequence, Dict, Set, List
from urllib.parse import urlparse
from collections import defaultdict
from itertools import count
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, hstack
from sklearn.feature_extraction.text import TfidfVectorizer
import language_check
from .sentiment.sentiment import get_affect_set, get_sentiment
def multi_hot_encode(x: Sequence[str],
prefix: str) -> (coo_matrix, Dict[str, int]):
"""
Return sparse matrix encoding categorical variables in x and dictionary
mapping categorical variables to column numbers.
Each record in x must be a single string with the categorical variables
separated by a comma. The prefix prepends the categorical variable name
to prevent collisions.
"""
data = []
i = []
j = []
col = count()
dummy_col = defaultdict(lambda: next(col))
for row, cat_vars in enumerate(x):
for cat_var in cat_vars.split(','):
prepended = f'{prefix}_{cat_var}'
data.append(1)
i.append(row)
j.append(dummy_col[prepended])
return coo_matrix((data, (i, j))), {v: k for k, v in dummy_col.items()}
def tfidf_text(x: Sequence[str],
prefix: str,
ngram: int = 1,
stop_words: bool = False) -> (coo_matrix, Dict[int, str]):
"""
Return sparse matrix encoding of TF-IDF encoding of x and dictionary
mapping each token to a column number.
"""
stop_words = 'english' if stop_words else None
tfidf = TfidfVectorizer(ngram_range=(1, ngram), stop_words=stop_words)
text = tfidf.fit_transform(x)
token_list = tfidf.get_feature_names()
text_map = {col: f'{prefix}_{token}'
for col, token in enumerate(token_list)}
return text, text_map
def combine(category_maps: Sequence[Dict[int, str]]) -> Dict[int, str]:
"""Return combined dictionary for mapping categories to column number."""
combined = category_maps[0]
for category_map in category_maps[1:]:
offset = len(combined)
offset_map = {col + offset: cat for col, cat in category_map.items()}
combined.update(offset_map)
return combined
def label_urls(netloc: pd.Series) -> pd.Series:
"""
Returns Series corresponding to article labels.
(1 is fake, 0 is true).
"""
url_labels = defaultdict(lambda: float('nan'))
labels_file = os.path.join('fake_news', 'pipeline', 'url_labels.csv')
with open(labels_file, 'r') as f:
reader = csv.reader(f)
for domain, label in reader:
label = float(label) if label else float('nan')
url_labels[domain] = label
return netloc.apply(lambda u: url_labels[u])
def get_netloc(urls: pd.Series) -> pd.Series:
"""Return series of netlocs from article urls."""
return urls.apply(lambda u: urlparse(u).netloc)
def get_domain_ending(url: str) -> str:
"""Return ending of domain."""
netloc = urlparse(url).netloc
match = re.search(r'\.(.+?)$', netloc)
return match.group(1)
def get_source_count(netlocs: pd.Series) -> coo_matrix:
"""
Return coo_matrix corresponding to the count of articles in database from
each article's publisher.
"""
source_counts = netlocs.groupby(netlocs).transform('count')
return coo_matrix(source_counts).T
def count_misspellings(text: str, dictionary: Set[str]) -> float:
"""Return proportion of misspellings in each article's text."""
words = re.sub(r'[^A-Za-z]', ' ', text).split()
misspellings = 0
word_count = len(words)
if word_count == 0:
return 0.0
for word in words:
if word[0].isupper():
continue
if word.lower() not in dictionary:
misspellings += 1
return misspellings / len(words)
def get_misspellings(text: pd.Series) -> pd.Series:
"""Return Series of misspelling counts in text."""
dict_file = os.path.join('fake_news', 'pipeline', 'Dictionary_690.csv')
with open(dict_file, 'r') as f:
words = f.readlines()
words = map(lambda x: x.strip(), words)
dictionary = {word for word in words}
return text.apply(lambda x: count_misspellings(x, dictionary))
def get_grammar_mistakes(text: pd.Series) -> pd.Series:
"""Return Series of grammar mistake counts in text."""
tool = language_check.LanguageTool('en-US')
def grammar_check(s: str) -> float:
"""Return count of grammar mistakes normalized by word count."""
wc = len(s.split())
if wc == 0:
return 0.0
return len(tool.check(s)) / wc
return text.apply(grammar_check)
def get_lshash(text: coo_matrix) -> List[str]:
"""
Return list of cosine LSHs encoding text.
"""
def cosine_LSH(vector, planes):
"""
Return a single cosine LSH for a particular record and given planes.
"""
sig = 0
for plane in planes:
sig <<= 1
if vector.dot(plane) >= 0:
sig |= 1
return str(sig)
bits = 512
random_projections = np.random.randn(bits, text.shape[1])
hashes = [cosine_LSH(text.getrow(idx), random_projections)
for idx in range(text.shape[0])]
return hashes
def build_sentiments(text: pd.Series) -> coo_matrix:
"""Return coo_matrix representing sentiment for all articles."""
affect_set = get_affect_set()
i = []
j = []
scores = []
for row, article_text in text.iteritems():
i.extend([row] * 10)
j.extend(list(range(10)))
scores.extend(get_sentiment(article_text, affect_set))
return coo_matrix((scores, (i, j)))
def transform_data(articles: pd.DataFrame,
ground_truth: pd.DataFrame, *,
tfidf: bool,
author: bool,
tags: bool,
title: bool,
ngram: int,
domain_endings: bool,
word_count: bool,
misspellings: bool,
grammar_mistakes: bool,
lshash: bool,
source_count: bool,
sentiment: bool,
stop_words: bool) -> (csr_matrix, csr_matrix,
Dict[str, int],
pd.Series, pd.Series):
"""
Return sparse matrix of features for modeling and dict mapping categories
to column numbers.
"""
articles['netloc'] = get_netloc(articles['url'])
ground_truth['netloc'] = get_netloc(ground_truth['url'])
articles['labels'] = label_urls(articles['netloc'])
ground_truth['labels'] = ground_truth['labels'].apply(pd.to_numeric)
articles.dropna(subset=['labels'], inplace=True)
articles_end = len(articles.index)
articles = articles.append(ground_truth, ignore_index=True)
res = []
if author:
res.append(multi_hot_encode(articles['authors'], 'auth'))
if tags:
res.append(multi_hot_encode(articles['tags'], 'tag'))
if tfidf:
tfidfed_text, tfidf_dict = tfidf_text(articles['text'], 'text', ngram,
stop_words)
res.append((tfidfed_text, tfidf_dict))
if title:
res.append(tfidf_text(articles['title'], 'title', ngram, stop_words))
if domain_endings:
articles['domain_ending'] = articles['url'].apply(get_domain_ending)
res.append(multi_hot_encode(articles['domain_ending'], 'domain'))
if word_count:
res.append((coo_matrix(articles['word_count']).T, {0: 'word_count'}))
if misspellings:
articles['misspellings'] = get_misspellings(articles['text'])
res.append((coo_matrix(articles['misspellings']).T,
{0: 'misspellings'}))
if grammar_mistakes:
articles['grammar_mistakes'] = get_grammar_mistakes(articles['text'])
res.append((coo_matrix(articles['grammar_mistakes']).T,
{0: 'grammar_mistakes'}))
if lshash:
if not tfidf:
tfidfed_text, _ = tfidf_text(articles['text'], 'text', ngram)
res.append(multi_hot_encode(get_lshash(tfidfed_text), 'lsh'))
if source_count:
res.append((get_source_count(articles['netloc']), {0: 'source_count'}))
if sentiment:
res.append((build_sentiments(articles['text']),
{0: 'sent_trust', 1: 'sent_fear', 2: 'sent_negative',
3: 'sent_sadness', 4: 'sent_anger', 5: 'sent_surprise',
6: 'sent_positive', 7: 'sent_disgust', 8: 'sent_joy',
9: 'sent_anticipation'}))
features = hstack([r[0] for r in res]).tocsr()
category_map = combine([r[1] for r in res])
articles.drop(articles.index[articles_end:], inplace=True)
return (features[:articles_end, :], features[articles_end:, :],
category_map, articles['labels'], ground_truth['labels'])
|
mit
|
yunfeilu/scikit-learn
|
benchmarks/bench_covertype.py
|
120
|
7381
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
mozman/ezdxf
|
src/ezdxf/addons/drawing/matplotlib_hatch.py
|
1
|
4111
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
# Predefined matplotlib pattern:
# / - diagonal hatching
# \ - back diagonal
# | - vertical
# - - horizontal
# + - crossed
# x - crossed diagonal
# o - small circle
# O - large circle
# . - dots
# * - stars
# 1x sparse
# 2x normal
# 3x dense
HATCH_NAME_MAPPING = {
'ACAD_ISO02W100': '---',
'ACAD_ISO03W100': '---',
'ACAD_ISO04W100': '---',
'ACAD_ISO05W100': '---',
'ACAD_ISO06W100': '---',
'ACAD_ISO07W100': '---',
'ACAD_ISO08W100': '---',
'ACAD_ISO09W100': '---',
'ACAD_ISO10W100': '---',
'ACAD_ISO11W100': '---',
'ACAD_ISO12W100': '---',
'ACAD_ISO13W100': '---',
'ACAD_ISO14W100': '---',
'ACAD_ISO15W100': '---',
'ANCHORLOCK': '++',
'ANGLE': '+++',
'ANSI31': '///',
'ANSI32': '//',
'ANSI33': '///',
'ANSI34': '//',
'ANSI35': '///',
'ANSI36': '///',
'ANSI37': 'xxx',
'ANSI38': 'xxx',
'AR-RROOF': '---',
'AR-SAND': '...',
'ASPHALT': '---...',
'BOARD': '---...',
'BRASS': '---...',
'BOX': '+++',
'BRICK': '+++',
'BRICK_FLBOND': '+++',
'BRICK_INSULATING': '///...',
'BRICK_LWEIGHT': '///...',
'BRICK_PAIRS': '++',
'BRICK_STBOND': '++',
'BRICK_STRBOND': '+',
'BRSTONE': '+++',
'BUTTERFLY': 'xxx|||',
'CHECKER': '+++',
'CLAY': '...---',
'CONCRETE1': 'oo',
'CONCRETE2': 'ooo',
'CONCRETE3': 'oooo',
'CONC_DEMOLITION': 'xxxx',
'CONC_EXISTING': 'xxxx',
'CONC_PRECAST': 'xxxx',
'CORK': '\\\\\\---',
'CROSS': '++++',
'CROSSES': 'xxxx',
'DASH': '---',
'DIAMONDS': 'xxx',
'DOLMIT': '//---',
'DOTGRID': '..',
'DOTS': '...',
'DOTS1': '...',
'DOTS2': '...',
'EARTH': '+++',
'EARTH1': '++++',
'EARTH2': 'xxxx',
'EGYPTIAN': '++++',
'ESCHER': '//\\\\--',
'FLEX': '---',
'FLEXIBLE': '---',
'GLASS': '...',
'GOST_GLASS': '...',
'GOST_GROUND': '///',
'GOST_WOOD': '|||',
'GRASS': '.',
'GRASS1': '..',
'GRASS2': '..',
'GRATE': '+++++',
'GRAVEL': '..',
'GRAVEL1': 'ooo',
'GRID': '++',
'GROUT': '...',
'HERRING_45': '+',
'HERRING_H': 'xx--',
'HERRING_UNI': '++',
'HERRING_V': 'xx',
'HEX': 'xx',
'HEXAGONS': 'xx',
'HONEY': 'xxx',
'HONEYCOMB': 'xxx',
'HOUND': '+++++',
'INSUL': '---',
'INSULATION': 'xxxxx',
'ISO02W100': '---',
'ISO03W100': '---',
'ISO04W100': '---',
'ISO05W100': '---',
'ISO06W100': '---',
'ISO07W100': '---',
'ISO08W100': '---',
'ISO09W100': '---',
'ISO10W100': '---',
'ISO11W100': '---',
'ISO12W100': '---',
'ISO13W100': '---',
'ISO14W100': '---',
'ISO15W100': '---',
'JIS_LC_20': '//',
'JIS_LC_20A': '//',
'JIS_LC_8': '///',
'JIS_LC_8A': '///',
'JIS_RC_10': '///',
'JIS_RC_15': '///',
'JIS_RC_18': '//',
'JIS_RC_30': '//',
'JIS_STN_1E': '///',
'JIS_STN_2.5': '///',
'JIS_WOOD': '///',
'LINE': '---',
'LINES': '---',
'MUDST': '---...',
'NATURAL': '///...',
'NET': '+++++',
'NET3': 'xxxxx-----',
'OCTAGONS': '+++',
'PLAST': '---',
'PLASTI': '---',
'PLUSSES': '..',
'ROCK': '---///',
'SACNCR': '////',
'SAND': 'xxxx',
'SCREED': '....',
'SHAKES': '+++',
'SPANISH': '+++',
'SQUARE': '++++',
'SQUARES': '++++',
'STARS': '**',
'STEEL': '///',
'SWAMP': '...',
'TILEPAT1': '+++',
'TRANS': '---',
'TRIANG': 'xxx',
'TRIANGLES': '****',
'TRIHEX': 'xx',
'V_BATTEN_FLOOR': '--',
'V_MASONRY200x100': '+++',
'V_MASONRY200x60': '++++',
'V_MASONRY200x75': '++++',
'V_MASONRY220x80': '++++',
'V_MASONRY300x100': '++++',
'V_MASONRY300x150': '+++',
'V_MASONRY300x200': '+++',
'V_MASONRY300x75': '++++',
'V_MASONRY400x100': '+++',
'V_MASONRY400x200': '+++',
'V_PARQUET': '---',
'V_STANDING_SEAM': '|||',
'V_ZINC': '|||',
'WAFFLE': '+++',
'WATER': '---',
'WOOD1': '///',
'WOOD2': '\\\\\\',
'WOOD3': '---',
'WOOD4': '----',
'ZIGZAG': '///'
}
|
mit
|
0asa/scikit-learn
|
examples/linear_model/plot_lasso_lars.py
|
363
|
1080
|
#!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
thilbern/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
12
|
16872
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings, assert_warns_message
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
"""
Principle of Lars is to keep covariances tied and decreasing
"""
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
"""
The same, with precomputed Gram matrix
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
"""
Test that lars_path with precomputed Gram and Xy gives the right answer
"""
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
"""
Test that Lars gives least square solution at the end
of the path
"""
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
"""
Test that Lars Lasso gives least square solution at the end
of the path
"""
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
"""Check that lars_path is robust to collinearity in input"""
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
"""
Test that the ``return_path=False`` option returns the correct output
"""
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
"""
Test that the ``return_path=False`` option with Gram remains correct
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
"""
Test that the ``return_path=False`` option with Gram and Xy remains correct
"""
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results.
"""
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results when early stopping is used.
(test : before, in the middle, and in the last part of the path)
"""
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
# Check that the deprecated return_models=True yields the same coefs path
with ignore_warnings():
lasso_coef = np.zeros((w.shape[0], len(lars_alphas)))
iter_models = enumerate(linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
return_models=True,
fit_intercept=False))
for i, model in iter_models:
lasso_coef[:, i] = model.coef_
np.testing.assert_array_almost_equal(lars_coef, lasso_coef, decimal=1)
np.testing.assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
np.testing.assert_array_almost_equal(lasso_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
"""
assure that at least some features get added if necessary
test for 6d2b4c
"""
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
"""
Assure that estimators receiving multidimensional y do the right thing
"""
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
""" Test the LassoLarsCV object by checking that the optimal alpha
increases as the number of samples increases.
This property is not actually garantied in general and is just a
property of the given dataset, with the given steps chosen.
"""
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
""" Test the LassoLarsIC object by checking that
- some good features are selected.
- alpha_bic > alpha_aic
- n_nonzero_bic < n_nonzero_aic
"""
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
"""LassoLarsIC should not warn for log of zero MSE."""
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
jgliss/pyplis
|
pyplis/dataset.py
|
1
|
46074
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Assorted data import functionality.
The :class:`Dataset` object is doing all sorts of stuff related to the
general data import setup, for instance the automated separation of image
files by their type (e.g. on-band, off-band, dark, offset) using
information from a file naming convention specified within a
:class:`Camera` object. For more information how to customise your data
import see :mod:`pyplis.setupclasses` or read `this little introductory
tutorial
<http://pyplis.readthedocs.io/en/latest/tutorials.html#primer-on-data-import>`_
"""
from __future__ import (absolute_import, division)
from os.path import exists, join, isfile, isdir
from os import listdir, walk
from datetime import datetime, date
from numpy import inf
from matplotlib.pyplot import subplots, FuncFormatter, tight_layout, Line2D
from matplotlib.patches import Rectangle
from copy import deepcopy
from traceback import format_exc
from collections import OrderedDict as od
from pyplis import logger, print_log
from .imagelists import ImgList, DarkImgList
from .image import Img
from .helpers import shifted_color_map
from .setupclasses import MeasSetup, Source, Camera
from .exceptions import ImgMetaError
import six
class Dataset(object):
"""Class for data import management.
Default input is a :class:`pyplis.setupclasses.MeasSetup` object, which
specifies the camera used (e.g. file naming convention, detector specifics)
the measurement geometry and information about the source and
meteorological wind direction, start / stop time stamps and the image
base directory.
Attributes
----------
setup : MeasSetup
class containing measurement setup information
lst_type : object
default type of ImgList objects (cf. :class:`CellCalibEngine`)
lists_access_info : OrderedDict
dictionary filled on data import based on camera specifications.
Used to map API IDs of filters and dark / offset information
(e.g. "on", "off", "dark0") onto internal list keys. It is normally
not required to use this dictionary or apply changes to it.
For EC2 camera standard this will look like::
>>> self.lists_access_info
OrderedDict([('on', ['F01', 'F01']),
('off', ['F02', 'F02']),
('offset0', ['D0L', 'D0L']),
('dark0', ['D1L', 'D1L']),
('offset1', ['D0H', 'D0H']),
('dark1', ['D1H', 'D1H'])])
Parameters
----------
input
Usable ``input`` includes :class:`MeasSetup` instance or a valid
image directory. ``input`` is passed to :func:`load_input`.
lst_type : object
default type of image list objects (e.g. :class:`ImgList`,
:class:`CellImgList`), defaults to :class:`ImgList`.
init : bool
init
"""
def __init__(self, input=None, lst_type=ImgList, init=1):
self.setup = None
self.lst_type = lst_type
self._lists_intern = od()
self.lists_access_info = od()
ok = self.load_input(input)
if init and ok:
self.init_image_lists()
else:
self.create_lists_default()
logger.info("DATASET INITIALISED")
@property
def camera(self):
"""Return camera base info object."""
return self.setup.camera
@camera.setter
def camera(self, val):
self.setup.camera = val
@property
def source(self):
"""Get / set current Source."""
return self.setup.source
@source.setter
def source(self, val):
self.setup.source = val
@property
def cam_id(self):
"""Return current camera ID."""
return self.setup.camera.cam_id
@property
def base_dir(self):
"""Getter / setter of current image base_dir."""
return self.setup.base_dir
@base_dir.setter
def base_dir(self, val):
if exists(val):
self.setup.base_dir = val
self.init_image_lists()
@property
def USE_ALL_FILES(self):
"""Return USE_ALL_FILES boolen from setup."""
return self.setup.USE_ALL_FILES
@USE_ALL_FILES.setter
def USE_ALL_FILES(self, val):
self.setup.USE_ALL_FILES = val
print_log.info("Option USE_ALL_FILES was updated in Dataset, please call class"
" method ``init_image_lists`` in order to apply the changes")
@property
def USE_ALL_FILE_TYPES(self):
"""Return USE_ALL_FILE_TYPES option from setup."""
return self.setup.USE_ALL_FILE_TYPES
@USE_ALL_FILE_TYPES.setter
def USE_ALL_FILE_TYPES(self, val):
self.setup.USE_ALL_FILE_TYPES = val
print_log.info("Option USE_ALL_FILE_TYPES was updated in Dataset, please call "
"class method ``init_image_lists`` in order "
"to apply the changes")
@property
def INCLUDE_SUB_DIRS(self):
"""Return boolean sub directory inclusion option."""
return self.setup.INCLUDE_SUB_DIRS
@INCLUDE_SUB_DIRS.setter
def INCLUDE_SUB_DIRS(self, val):
self.setup.INCLUDE_SUB_DIRS = val
print_log.info("Option INCLUDE_SUB_DIRS was updated in Dataset, please call "
"class method ``init_image_lists`` to apply the changes")
@property
def LINK_OFF_TO_ON(self):
"""I/O option defined in :class:`BaseSetup`."""
return self.setup.LINK_OFF_TO_ON
@LINK_OFF_TO_ON.setter
def LINK_OFF_TO_ON(self, val):
self.setup.LINK_OFF_TO_ON = val
print_log.info("Option INCLUDE_SUB_DIRS was updated in Dataset, please call "
"class method ``init_image_lists`` in order "
"to apply the changes")
@property
def start(self):
"""Getter / setter for current start time stamp."""
return self.setup.start
@start.setter
def start(self, val):
self.setup.start = val
print_log.info("Start time stamp was updated in Dataset, please call class "
"method ``init_image_lists`` in order to apply the changes")
@property
def stop(self):
"""Getter / setter for current stop time stamp."""
return self.setup.stop
@stop.setter
def stop(self, val):
self.setup.stop = val
print_log.info("Stop time stamp was updated in Dataset, please call class "
"method ``init_image_lists`` in order to apply the changes")
@property
def file_type(self):
"""Return current image file type."""
return self.setup.camera.file_type
@property
def meas_geometry(self):
"""Return current measurement geometry."""
return self.setup.meas_geometry
@property
def filters(self):
"""Return the current filter setup."""
return self.setup.camera.filter_setup
@property
def filter_acronyms(self):
"""Make a dictionary of filter IDs and corresponding acronyms."""
acros = {}
for key, val in six.iteritems(self.filters.filters):
# read the acronym from the filter object
acros[key] = val.acronym
return acros
@property
def num_of_filters(self):
"""Return the number of filters in ``self.filters``."""
return len(self.filters.filters.keys())
@property
def _fname_access_flags(self):
return self.camera._fname_access_flags
@property
def rects(self):
"""Return rectangle collection."""
return self.setup.forms.rects
@rects.setter
def rects(self, name, value):
"""Setter method for rectangle collection stored in ``self.setup``."""
self.setup.forms.rects[name] = value
@property
def lines(self):
"""Return rectangle collection."""
return self.setup.forms.lines
@lines.setter
def lines(self, name, value):
"""Setter method for rectangle collection stored in ``self.setup``."""
self.setup.forms.lines[name] = value
def load_input(self, input):
"""Extract information from input and set / update self.setup.
Parameters
----------
input
Usable ``input`` includes :class:`MeasSetup` instance or a
valid image directory.
Returns
-------
bool
``True``, if input could be utilised, ``False`` if not
"""
if self.set_setup(input):
return 1
logger.info("Creating empty MeasSetup within Dataset")
self.setup = MeasSetup()
if input is None:
return 0
if isinstance(input, str) and isdir(input):
logger.info("Updating base_dir variable in self.setup with input "
"directory: %s" % input)
self.change_img_base_dir(input)
elif isinstance(input, Source):
self.setup.source = input
elif isinstance(input, Camera):
self.setup.camera = input
elif isinstance(input, datetime):
logger.info("Input is datetime and will be set as start time for data "
"import")
self.setup.start = input
else:
raise TypeError("Invalid input: %s.\n Require MeasSetup or "
"valid directory containing images"
% type(input))
return 0
def set_setup(self, stp):
"""Set the current measurement setup.
Parameters
----------
stp : MeasSetup
Class containing information about measurement setup
"""
if isinstance(stp, MeasSetup):
logger.info("Updating measurement setup in Dataset")
self.setup = stp
return 1
return 0
def init_image_lists(self):
"""Create and fill image lists."""
#: create img list objects for each filter and for dark / offset lists
self.create_lists_cam()
return self.fill_image_lists()
def create_lists_default(self):
"""Initialize of default lists (if camera specs not available)."""
self._lists_intern = od()
for key, f in six.iteritems(self.filters.filters):
l = self.lst_type(list_id=key, list_type=f.type,
camera=self.camera,
geometry=self.meas_geometry)
l.filter = f
if f.meas_type_acro not in self._lists_intern:
self._lists_intern[f.meas_type_acro] = od()
self._lists_intern[f.meas_type_acro][f.acronym] = l
self.lists_access_info[f.id] = [f.meas_type_acro, f.acronym]
def create_lists_cam(self):
"""Initialize of all image lists, old lists are deleted."""
self._lists_intern = od()
for key, f in six.iteritems(self.filters.filters):
l = self.lst_type(list_id=key, list_type=f.type,
camera=self.camera,
geometry=self.meas_geometry)
l.filter = f
if f.meas_type_acro not in self._lists_intern:
self._lists_intern[f.meas_type_acro] = od()
self._lists_intern[f.meas_type_acro][f.acronym] = l
self.lists_access_info[f.id] = [f.meas_type_acro, f.acronym]
if not bool(self.camera.dark_info):
msg = ("Warning: dark image lists could not be initiated, no "
"dark image file information available in self.camera")
print_log.warning(msg)
return 0
for item in self.camera.dark_info:
l = DarkImgList(list_id=item.id, list_type=item.type,
read_gain=item.read_gain, camera=self.camera)
l.filter = item
if item.meas_type_acro not in self._lists_intern:
self._lists_intern[item.meas_type_acro] = od()
self._lists_intern[item.meas_type_acro][item.acronym] = l
self.lists_access_info[item.id] = [item.meas_type_acro,
item.acronym]
def fill_image_lists(self):
"""Import all images and fill image list objects."""
warnings = []
cam = self.camera
#: check if image filetype is specified and if not, set option to use
#: all file types
self._check_file_type()
if self.base_dir is None or not exists(self.base_dir):
s = ("Warning: image base directory does not exist, method "
"init_image_lists aborted in Dataset")
warnings.append(s)
print_log.warning(s)
return False
#: load all file paths
paths = self.get_all_filepaths()
# paths now includes all valid paths dependent on whether file_type is
# specified or not and whether also subdirectories were considered
if not bool(paths):
s = ("Warning: lists could not be initiated, no valid files found "
"method init_image_lists aborted in Dataset")
warnings.append(s)
print_log.warning(s)
return False
# check which image meta information can be accessed from first file in
# list (updates ``_fname_access_flags`` in :class:`Camera`)
self.check_filename_info_access(paths[0])
# get the current meta access flags
flags = cam._fname_access_flags
if self.USE_ALL_FILES and flags["start_acq"]:
# take all files in the basefolder (i.e. set start and stop date
# the first and last date of the files in the folder)
self.setup.start = cam.get_img_meta_from_filename(paths[0])[0]
self.setup.stop = cam.get_img_meta_from_filename(paths[-1])[0]
#: Set option to use all files in case acquisition time stamps cannot
#: be accessed from filename
if not flags["start_acq"]:
print_log.warning("Acquisition time access from filename not possible, "
"using all files")
self.setup.options["USE_ALL_FILES"] = True
#: Separate the current list based on specified time stamps
if not self.setup.options["USE_ALL_FILES"]:
paths_temp = self.extract_files_time_ival(paths)
if not bool(paths_temp):
# check if any files were found in specified t-window
s = ("No images found in specified time interval "
"%s - %s, mode was changed to: USE_ALL_FILES=True"
% (self.start, self.stop))
warnings.append(s)
self.setup.options["USE_ALL_FILES"] = True
else:
paths = paths_temp
if self.setup.ON_OFF_SAME_FILE:
logger.warning("Option ON_OFF_SAME_FILE is active: using same file paths "
"in default on and offband list. Please note that no "
"further file separation is applied (e.g. separation of "
"dark images)")
# the function add_files ads the file paths to the list and loads
# the current and next images (at index 0 and 1)
self.img_lists[self.filters.default_key_on].add_files(paths)
self.img_lists[self.filters.default_key_off].add_files(paths)
else:
if not (flags["filter_id"] and flags["meas_type"]):
#: it is not possible to separate different image types (on,
#: off, dark..) from filename, thus all are loaded into on
#: image list
warnings.append("Images can not be separated by type/meas_type"
" (e.g. on, off, dark, offset...) from "
"filename info, loading "
"all files into on-band list")
self.setup.options["SEPARATE_FILTERS"] = False
i = self.lists_access_info[self.filters.default_key_on]
self._lists_intern[i[0]][i[1]].add_files(paths)
[logger.warning(x) for x in warnings]
return True
#: now perform separation by meastype and filter
for p in paths:
try:
_, filter_id, meas_type, _, _ = self.camera. \
get_img_meta_from_filename(p)
self._lists_intern[meas_type][filter_id].files.append(p)
except:
logger.warning("File %s could not be added..." % p)
for meas_type, sub_dict in six.iteritems(self._lists_intern):
for filter_id, lst in six.iteritems(sub_dict):
lst.init_filelist()
for lst in self.img_lists_with_data.values():
lst.load()
self.assign_dark_offset_lists()
if self.LINK_OFF_TO_ON:
try:
off_list = self.get_list(self.filters.default_key_off)
self.get_list(self.filters.default_key_on). \
link_imglist(off_list)
except BaseException:
pass
if self.setup.REG_SHIFT_OFF:
for lst in self.img_lists_with_data.values():
if lst.list_type == "off":
lst.shift_mode = True
[logger.warning(x) for x in warnings]
return True
def get_all_filepaths(self):
"""Find all valid image filepaths in current base directory.
Returns
-------
list
list containing all valid image file paths (Note, that these
include all files found in the folder(s) in case the file
type is not explicitely set in the camera class.)
"""
logger.info("\nSEARCHING VALID FILE PATHS IN\n%s\n" % self.base_dir)
p = self.base_dir
ftype = self.file_type
if not isinstance(ftype, str):
logger.warning("file_type not specified in Dataset..."
"Using all files and file_types")
self.setup.options["USE_ALL_FILES"] = True
self.setup.options["USE_ALL_FILE_TYPES"] = True
if p is None or not exists(p):
message = ('Error: path %s does not exist' % p)
logger.warning(message)
return []
if not self.INCLUDE_SUB_DIRS:
logger.info("Image search is only performed in specified directory "
"and does not include subdirectories")
if self.USE_ALL_FILE_TYPES:
logger.info("Using all file types")
all_paths = [join(p, f) for f in listdir(p) if
isfile(join(p, f))]
else:
logger.info("Using only %s files" % self.file_type)
all_paths = [join(p, f) for f in listdir(p) if
isfile(join(p, f)) and f.endswith(ftype)]
else:
logger.info("Image search includes files from subdirectories")
all_paths = []
if self.USE_ALL_FILE_TYPES:
logger.info("Using all file types")
for path, subdirs, files in walk(p):
for filename in files:
all_paths.append(join(path, filename))
else:
logger.info("Using only %s files" % ftype)
for path, subdirs, files in walk(p):
for filename in files:
if filename.endswith(ftype):
all_paths.append(join(path, filename))
all_paths.sort()
logger.info("Total number of files found %s" % len(all_paths))
return all_paths
def check_filename_info_access(self, filepath):
"""Check which information can be accessed from file name.
The access test is performed based on the filename access
information specified in the :class:`Camera` object of the
measurement setup
Parameters
----------
filepath : str
valid file path of an example image
Returns
-------
dict
Dictionary containing information about which meta inforamtion
could be identified from the image file path based on the
current camera
"""
err = self.camera.get_img_meta_from_filename(filepath)[4]
for item in err:
logger.warning(item)
return self.camera._fname_access_flags
def change_img_base_dir(self, img_dir):
"""Set or update the current base_dir.
:param str p: new path
"""
if not exists(img_dir):
msg = ("Could not update base_dir, input path %s does not "
"exist" % img_dir)
logger.warning(msg)
self.warnings.append(msg)
return 0
self.setup.base_dir = img_dir
def _check_file_type(self):
"""Check if filtype information is available.
Sets::
self.USE_ALL_FILE_TYPES = True
if file type information can not be accessed
"""
info = self.camera
val = True
if isinstance(info.file_type, str):
val = False
self.setup.USE_ALL_FILE_TYPES = val
def extract_files_time_ival(self, all_paths):
"""Extract all files belonging to specified time interval.
:param list all_paths: list of image filepaths
"""
if not self.camera._fname_access_flags["start_acq"]:
logger.warning("Acq. time information cannot be accessed from file names")
return all_paths
acq_time0 = self.camera.get_img_meta_from_filename(all_paths[0])[0]
if acq_time0.date() == date(1900, 1, 1):
paths = self._find_files_ival_time_only(all_paths)
else:
paths = self._find_files_ival_datetime(all_paths)
if not bool(paths):
print_log.warning("Error: no files could be found in specified time "
"interval %s - %s" % (self.start, self.stop))
self.USE_ALL_FILES = True
else:
logger.info("%s files of type were found in specified time interval %s "
"- %s" % (len(paths), self.start, self.stop))
return paths
def _find_files_ival_time_only(self, all_paths):
"""Extract all files belonging to specified time interval.
:param list all_paths: list of image filepaths
"""
paths = []
i, f = self.start.time(), self.stop.time()
func = self.camera.get_img_meta_from_filename
for path in all_paths:
acq_time = func(path)[0].time()
if i <= acq_time <= f:
paths.append(path)
return paths
def _find_files_ival_datetime(self, all_paths):
"""Extract all files belonging to specified time interval.
This function considers the datetime stamps of ``self.start`` and
``self.stop``, see also :func:`_find_files_ival_time_only` which only
uses the actual time to find valid files.
:param list all_paths: list of image filepaths
"""
paths = []
func = self.camera.get_img_meta_from_filename
i, f = self.start, self.stop
for path in all_paths:
acq_time = func(path)[0]
if i <= acq_time <= f:
paths.append(path)
if not bool(paths):
logger.warning("Error: no files could be found in specified time "
"interval %s - %s" % (self.start, self.stop))
else:
logger.info("%s files of type were found in specified time interval %s "
"- %s" % (len(paths), self.start, self.stop))
return paths
def find_closest_img(self, filename, in_list, acronym, meas_type_acro):
"""Find closest-in-time image to input image file.
:param str filename: image filename
:param str in_list: input list with filepaths
:param str acronym: the acronym of the image type to be searched (e.g.
an acronym for a dark image as specified in camera)
:param str meas_type_acro: meas type acronym of image type to be
searched (e.g. an acronym for a dark image as specified in
camera)
"""
get_meta = self.camera.get_img_meta_from_filename
t0 = get_meta(filename)[0]
del_t = inf
idx = -1
for k in range(len(in_list)):
t1, f1, tp1, _, _ = get_meta(in_list[k])
if f1 == acronym and abs(t1 - t0).total_seconds() < del_t and \
meas_type_acro == tp1:
del_t = abs(t1 - t0).total_seconds()
idx = k
if idx == -1 or del_t == inf:
raise Exception("Error in func find_closest_img: no match")
return in_list[idx]
def all_lists(self):
"""Return list containing all available image lists.
Loops over ``self._lists_intern`` and the corresponding sub directories
"""
lists = []
for meas_type, sub_dict in six.iteritems(self._lists_intern):
for filter_id, lst in six.iteritems(sub_dict):
lists.append(lst)
return lists
@property
def dark_ids(self):
"""Get all dark IDs."""
ids = []
for info in self.camera.dark_info:
ids.append(info.id)
return ids
def assign_dark_offset_lists(self, into_list=None):
"""Assign dark and offset lists to image lists ``self.lists``.
Assign dark and offset lists in filter lists for automatic dark and
offset correction. The lists are set dependent on the read_gain
mode of the detector
:param ImgList into_list (None): optional input, if specified, the dark
assignment is performed only in the input list
"""
if isinstance(into_list, ImgList):
into_list.link_dark_offset_lists(
*list(self.dark_lists_with_data.values()))
return True
# ==============================================================================
# no_dark_ids = self.check_dark_lists()
# if len(no_dark_ids) > 0:
# self.find_master_darks(no_dark_ids)
# ==============================================================================
# loop over all image lists ...
for filter_id, lst in six.iteritems(self.img_lists):
# ... that contain data
if lst.nof > 0:
lists = od()
if self.camera.meas_type_pos != self.camera.filter_id_pos:
for dark_mtype in self.camera.dark_meas_type_acros:
for dark_acro in self.camera.dark_acros:
try:
if (lst.filter.acronym in dark_acro and
lst.filter.meas_type_acro == dark_mtype):
dark_lst = self. \
_lists_intern[dark_mtype][dark_acro]
if isinstance(dark_lst, DarkImgList):
lists[dark_lst.list_id] = dark_lst
logger.info("Found dark list match for "
"image list %s, dark ID: %s"
% (lst.list_id,
dark_lst.list_id))
except BaseException:
pass
for offs_mtype in self.camera.offset_meas_type_acros:
for offset_acro in self.camera.offset_acros:
try:
if lst.filter.acronym in offset_acro:
offs_lst = self. \
_lists_intern[offs_mtype][offset_acro]
if isinstance(offs_lst, DarkImgList):
lists[offs_lst.list_id] = offs_lst
logger.info("Found offset list match for "
"image list %s: dark ID: %s"
% (lst.list_id,
offs_lst.list_id))
except BaseException:
pass
if not lists:
lists = self.dark_lists
rm = []
for key, dark_list in six.iteritems(lists):
if dark_list.nof < 1:
try:
self.find_master_dark(dark_list)
except BaseException:
rm.append(key)
for key in rm:
del lists[key]
# now lists only contains dark and offset lists with data
for l in lists.values():
if not isinstance(l.loaded_images["this"], Img):
l.load()
if not bool(lists):
logger.warning("Failed to assign dark / offset lists to image "
"list %s, no dark images could be found" % filter_id)
else:
logger.info("Assigning dark/offset lists %s to image list %s\n"
% (list(lists.keys()), filter_id))
lst.link_dark_offset_lists(*list(lists.values()))
return True
def get_all_dark_offset_lists(self):
"""Get all dark and offset image lists."""
lists = od()
for dark_id in self.dark_ids:
info = self.lists_access_info[dark_id]
lists[dark_id] = self._lists_intern[info[0]][info[1]]
return lists
@property
def dark_lists(self):
"""Call and return :func:`get_all_dark_offset_lists`."""
return self.get_all_dark_offset_lists()
@property
def dark_lists_with_data(self):
"""Return all dark/offset lists that include image data."""
lists = od()
for dark_id, lst in six.iteritems(self.dark_lists):
if lst.nof > 0:
lists[dark_id] = lst
return lists
@property
def filter_ids(self):
"""Get all dark IDs."""
return self.filters.filters.keys()
def get_all_image_lists(self):
"""Get all image lists (without dark and offset lists)."""
lists = od()
for filter_id in self.filter_ids:
info = self.lists_access_info[filter_id]
lists[filter_id] = self._lists_intern[info[0]][info[1]]
return lists
@property
def img_lists(self):
"""Wrap :func:`get_all_image_lists`."""
return self.get_all_image_lists()
@property
def img_lists_with_data(self):
"""Wrap :func:`get_all_image_lists`."""
lists = od()
for key, lst in six.iteritems(self.img_lists):
if lst.nof > 0:
lists[key] = lst
return lists
def check_dark_lists(self):
"""Check all dark lists whether they contain images or not."""
no_data_ids = []
for dark_id, lst in six.iteritems(self.dark_lists):
if not lst.nof > 0:
no_data_ids.append(lst.list_id)
return no_data_ids
def find_master_dark(self, dark_list):
"""Search master dark image for a specific dark list.
Search a master dark image for all dark image lists that do not
contain images
"""
dark_id = dark_list.list_id
logger.info("\nSearching master dark image for dark list %s" % dark_id)
flags = self.camera._fname_access_flags
if not (flags["filter_id"] and flags["meas_type"]):
#: it is not possible to separate different image types (on, off,
#: dark..) from filename, thus dark or offset images can not be
#: searched
raise ImgMetaError("Image identification via file name is not "
"possible in Dataset")
all_files = self.get_all_filepaths()
l = self.get_list(self.filters.default_key_on)
if l.data_available:
f_name = l.files[int(l.nof / 2)]
else:
f_name = all_files[int(len(all_files) / 2.)]
meas_type_acro, acronym = self.lists_access_info[dark_id]
try:
p = self.find_closest_img(f_name, all_files, acronym,
meas_type_acro)
dark_list.files.append(p)
dark_list.init_filelist()
logger.info("Found dark image for ID %s\n" % dark_id)
except BaseException:
logger.info("Failed to find dark image for ID %s\n" % dark_id)
raise Exception
return dark_list
def find_master_darks(self, dark_ids=None):
"""Search master dark image for dark image lists.
Search a master dark image for all dark image lists that do not
contain images.
"""
if dark_ids is None:
dark_ids = []
logger.info("\nCHECKING DARK IMAGE LISTS IN DATASET")
flags = self.camera._fname_access_flags
if not (flags["filter_id"] and flags["meas_type"]):
#: it is not possible to separate different image types (on, off,
#: dark..) from filename, thus dark or offset images can not be
#: searched
return []
all_files = self.get_all_filepaths()
l = self.get_list(self.filters.default_key_on)
if l.data_available:
f_name = l.files[int(l.nof / 2)]
else:
f_name = all_files[int(len(all_files) / 2.)]
failed_ids = []
if not bool(dark_ids):
dark_ids = self.dark_lists.keys()
for dark_id in dark_ids:
lst = self.dark_lists[dark_id]
if not lst.nof > 0:
meas_type_acro, acronym = self.lists_access_info[dark_id]
logger.info("\nSearching master dark image for\nID: %s\nacronym: %s"
"\nmeas_type_acro: %s" % (dark_id, acronym,
meas_type_acro))
try:
p = self.find_closest_img(f_name, all_files, acronym,
meas_type_acro)
lst.files.append(p)
lst.init_filelist()
logger.info("Found dark image for ID %s\n" % dark_id)
except BaseException:
logger.info("Failed to find dark image for ID %s\n" % dark_id)
failed_ids.append(dark_id)
return failed_ids
def check_image_access_dark_lists(self):
"""Check whether dark and offset image lists contain at least one img.
"""
for lst in self.dark_lists.values():
if not lst.data_available:
return False
return True
"""Helpers"""
def images_available(self, filter_id):
"""Check if image list has images.
:param str filter_id: string (filter) ID of image list
"""
try:
if self.get_list(filter_id).nof > 0:
return 1
return 0
except BaseException:
return 0
def current_image(self, filter_id):
"""Get current image of image list.
:param str filter_id: filter ID of image list
"""
try:
return self.get_list(filter_id).current_img()
except BaseException:
return 0
def get_list(self, list_id):
"""Get image list for one filter.
:param str filter_id: filter ID of image list (e.g. "on")
"""
if list_id not in self.lists_access_info.keys():
raise KeyError("%s ImgList could not be found..." % list_id)
info = self.lists_access_info[list_id]
lst = self._lists_intern[info[0]][info[1]]
if not lst.nof > 0:
logger.warning("Image list %s does not contain any images" % list_id)
return lst
def get_current_img_prep_dict(self, list_id=None):
"""Get the current image preparation settings from one image list.
:param str list_id: ID of image list
"""
if list_id is None:
list_id = self.filters.default_key_on
return self.get_list(list_id).img_prep
def load_images(self):
"""Load the current images in all image lists.
Note
----
Gives warning for lists containing no images
"""
for lst in self.all_lists():
if lst.nof > 0:
lst.load()
else:
logger.warning("No images available in list %s" % lst.list_id)
def update_image_prep_settings(self, **settings):
"""Update image preparation settings in all image lists."""
for list_id, lst in six.iteritems(self.img_lists):
logger.info("Checking changes in list %s: " % list_id)
val = lst.update_img_prep_settings(**settings)
logger.info("list %s updated (0 / 1): %s" % (list_id, val))
def update_times(self, start, stop):
"""Update start and stop times of this dataset and reload.
:param datetime start: new start time
:param datetime stop: new stop time
"""
if not all([isinstance(x, datetime) for x in [start, stop]]):
raise TypeError("Times could not be changed in Dataset, "
"wrong input type: %s, %s (need datetime)"
% (type(start), type(stop)))
self.setup.start = start
self.setup.stop = stop
self.setup.check_timestamps()
def duplicate(self):
"""Duplicate Dataset object."""
logger.info('Dataset successfully duplicated')
return deepcopy(self)
"""GUI stuff
"""
# ==============================================================================
# def open_in_gui(self):
# """Open this dataset in GUI application"""
# try:
# import pyplis.gui as gui
# app=QApplication(argv)
#
# #win = DispTwoImages.DispTwoImagesWidget(fileListRight=fileList)
# win = gui.MainApp.MainApp(self)
# win.show()
# app.exec_() #run main loop
# except:
# print ("Error: could not open pyplis GUI")
# raise
# ==============================================================================
"""
Plotting etc.
"""
def show_current_img(self, filter_id, add_forms=False):
"""Plot current image.
:param str filter_id: filter ID of image list (e.g. "on")
"""
ax = self.current_image(filter_id).show_img()
if add_forms:
handles = []
for k, v in six.iteritems(self.lines._forms):
l = Line2D([v[0], v[2]], [v[1], v[3]], color="#00ff00",
label=k)
handles.append(l)
ax.add_artist(l)
for k, v in six.iteritems(self.rects._forms):
w, h = v[2] - v[0], v[3] - v[1]
r = Rectangle((v[0], v[1]), w, h, ec="#00ff00", fc="none",
label=k)
ax.add_patch(r)
handles.append(r)
ax.legend(handles=handles, loc='best', fancybox=True,
framealpha=0.5, fontsize=10).draggable()
return ax
# ax.draw()
def plot_mean_value(self, filter_id, yerr=1, rect=None):
"""Plot the pixel mean value of specified filter.
Only pixel values in the time span covered by this dataset are used.
"""
self.get_list(filter_id).plot_mean_value(yerr=yerr, rect=rect)
def draw_map_2d(self, *args, **kwargs):
"""Call and return :func:`draw_map_2d` of ``self.meas_geometry``."""
return self.meas_geometry.draw_map_2d(*args, **kwargs)
def draw_map_3d(self, *args, **kwargs):
"""Call and return :func:`draw_map_3d` of ``self.meas_geometry``."""
return self.meas_geometry.draw_map_3d(*args, **kwargs)
def print_list_info(self):
"""Print overview information about image lists."""
s = ("info about image lists in dataset\n-------------------------\n\n"
"Scene image lists:\n------------------------\n")
for lst in self.img_lists.values():
s += ("ID: %s, type: %s, %s images\n"
% (lst.list_id, lst.list_type, lst.nof))
s += "\nDark image lists:\n------------------------\n"
for lst in self.dark_lists.values():
s += ("ID: %s, type: %s, read_gain: %s, %s images\n"
% (lst.list_id, lst.list_type, lst.read_gain, lst.nof))
logger.info(s)
"""
THE FOLLOWING STUFF WAS COPIED FROM OLD PLUMEDATA OBJECT
"""
def connect_meas_geometry(self):
"""Set pointer to current measurement geometry within image lists."""
if self.meas_geometry is not None:
for filter_id in self.filters.filters.keys():
self.get_list(filter_id).set_meas_geometry(self.meas_geometry)
def plot_tau_preview(self, on_id="on", off_id="off", pyrlevel=2):
"""Plot a preview of current tau_on, tau_off and AA images.
AA is plotted twice in 1st row of subplots in 2 diffent value ranges.
:param str on_id: string ID of onband filter ("on")
:param str off_id: string ID of offband filter ("off")
:param pyrlevel: provide any integer here to reduce the image
sizes using a gaussian pyramide approach (2)
"""
lists = {}
tm = {on_id: 1,
off_id: 1}
def fmt(num):
return '{:.1e}'.format(num)
for list_id in [on_id, off_id]:
try:
l = self.get_list(list_id)
lists[list_id] = l
if not l.bg_model.ready_2_go():
logger.info("Tau preview could not be plotted, bg model is not "
" ready for filter: %s" % list_id)
return 0
if not l.tau_mode:
tm[list_id] = 0
l.activate_tau_mode()
except BaseException:
logger.info(format_exc())
return 0
fig, axes = subplots(2, 2, figsize=(16, 10))
tau_on = lists[on_id].current_img().pyr_down(pyrlevel)
t_on_str = lists[on_id].current_time_str()
tau_off = lists[off_id].current_img().pyr_down(pyrlevel)
t_off_str = lists[off_id].current_time_str()
aa = tau_on - tau_off # AA image object
tau_max = max([tau_on.img.max(), tau_off.img.max(), aa.img.max()])
tau_min = max([tau_on.img.min(), tau_off.img.min(), aa.img.min()])
# make a color map for the index range
cmap = shifted_color_map(tau_min, tau_max)
im = axes[0, 0].imshow(tau_on.img, cmap=cmap,
vmin=tau_min, vmax=tau_max)
fig.colorbar(im, ax=axes[0, 0], format=FuncFormatter(fmt))
axes[0, 0].set_title("tau on: %s" % t_on_str)
im = axes[0, 1].imshow(tau_off.img, cmap=cmap,
vmin=tau_min, vmax=tau_max)
fig.colorbar(im, ax=axes[0, 1], format=FuncFormatter(fmt))
axes[0, 1].set_title("tau off: %s" % t_off_str)
im = axes[1, 0].imshow(aa.img, cmap=cmap,
vmin=tau_min, vmax=tau_max)
fig.colorbar(im, ax=axes[1, 0], format=FuncFormatter(fmt))
axes[1, 0].set_title("AA (vals scaled)")
cmap = shifted_color_map(aa.img.min(), aa.img.max())
im = axes[1, 1].imshow(aa.img, cmap=cmap)
fig.colorbar(im, ax=axes[1, 1], format=FuncFormatter(fmt))
axes[1, 1].set_title("AA")
tight_layout()
for k, v in six.iteritems(tm):
lists[k].activate_tau_mode(v)
return axes
def __getitem__(self, key):
"""Get one class item.
Searches in ``self.__dict__`` and ``self.setup`` and returns item if
match found
:param str key: name of item
"""
if key in self.setup.__dict__:
return self.setup.__dict__[key]
elif key in self.__dict__:
return self.__dict__[key]
def __setitem__(self, key, val):
"""Update an item values.
Searches in ``self.__dict__`` and ``self.setup`` and overwrites if
match found
:param str key: key of item (e.g. base_dir)
:param val: the replacement
"""
if key in self.setup.__dict__:
self.setup.__dict__[key] = val
elif key in self.__dict__:
self.__dict__[key] = val
|
gpl-3.0
|
sbrodeur/ros-icreate-bbb
|
src/action/scripts/record/data/visualize_hdf5.py
|
1
|
44116
|
#!/usr/bin/env python
# Copyright (c) 2016, Simon Brodeur
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import io
import sys
import os
import time
import logging
import numpy as np
import cv2
import scipy
import scipy.io.wavfile
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
import traceback
import multiprocessing
from multiprocessing.pool import Pool
from h5utils import Hdf5Dataset
from optparse import OptionParser
logger = logging.getLogger(__name__)
class LogExceptions(object):
"""
Adapted from: http://stackoverflow.com/questions/6728236/exception-thrown-in-multiprocessing-pool-not-detected
"""
def __init__(self, callable):
self.__callable = callable
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception as e:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
logger.error(traceback.format_exc())
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
class LoggingPool(Pool):
"""
From: http://stackoverflow.com/questions/6728236/exception-thrown-in-multiprocessing-pool-not-detected
"""
def apply_async(self, func, args=(), kwds={}, callback=None):
return Pool.apply_async(self, LogExceptions(func), args, kwds, callback)
def is_cv2():
import cv2 as lib
return lib.__version__.startswith("2.")
def is_cv3():
import cv2 as lib
return lib.__version__.startswith("3.")
# Adapted from: http://stackoverflow.com/questions/22867620/putting-arrowheads-on-vectors-in-matplotlibs-3d-plot
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def set_data(self, xs, ys, zs):
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
# Adapted from: https://afni.nimh.nih.gov/pub/dist/src/pkundu/meica.libs/nibabel/quaternions.py
def quat2mat(q):
w, x, y, z = q
Nq = w*w + x*x + y*y + z*z
if Nq < np.finfo(np.float64).eps:
return np.eye(3)
s = 2.0/Nq
X = x*s
Y = y*s
Z = z*s
wX = w*X; wY = w*Y; wZ = w*Z
xX = x*X; xY = x*Y; xZ = x*Z
yY = y*Y; yZ = y*Z; zZ = z*Z
return np.array(
[[ 1.0-(yY+zZ), xY-wZ, xZ+wY ],
[ xY+wZ, 1.0-(xX+zZ), yZ-wX ],
[ xZ-wY, yZ+wX, 1.0-(xX+yY) ]])
def exportQuaternionFramesAsVideo(frames, fs, filename, title, grid=False, fsVideo=None):
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
# Create arrows
arrows = []
labels = ['x', 'y', 'z']
colors = ['r', 'g', 'b']
vectors = np.eye(3)
for i in range(3):
x,y,z = vectors[:,i]
arrow = Arrow3D([0.0, x], [0.0, y], [0.0, z], mutation_scale=20,
lw=3, arrowstyle="-|>", color=colors[i], label=labels[i])
arrows.append(arrow)
ax = fig.gca(projection='3d')
fig.tight_layout()
fig.subplots_adjust(left=0.10, bottom=0.10)
ax.grid(grid)
ax.set_title(title)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.set_zticks([])
ax.view_init(elev=45.0, azim=45.0)
ax.axis([-1.0, 1.0, -1.0, 1.0])
ax.set_zlim(-1.0, 1.0)
# Create arrows
for arrow in arrows:
ax.add_artist(arrow)
proxies = [plt.Rectangle((0, 0), 1, 1, fc=c) for c in colors]
legend = ax.legend(proxies, labels, loc='upper right')
startTime = time.time()
with writer.saving(fig, filename, 100):
for n, frame in enumerate(frames):
if n % int(downsampleRatio) == 0:
# Convert from quaternion (w, x, y, z) to rotation matrix
x,y,z,w = frame
quaternion = np.array([w,x,y,z])
R = quat2mat(quaternion)
# Apply the rotation to the axis vectors (pointing in Y-axis)
directions = np.eye(3) # x, y, z as column vectors
vectors = np.dot(R, directions)
assert np.allclose(np.linalg.norm(vectors, 2, axis=0), np.ones((3,)), atol=1e-6)
# Update existing plot
for i in range(3):
x,y,z = vectors[:,i]
arrows[i].set_data([0.0, x], [0.0, y], [0.0, z])
writer.grab_frame()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(frames)/elapsedTime))
plt.close(fig)
def exportPositionFramesAsVideo(framesPos, framesOri, fs, filename, title, grid=False, fsVideo=None):
assert len(framesPos) == len(framesOri)
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
ax = fig.add_subplot(111)
scatPast = ax.scatter([0,], [0,], s=10)
scatCur = ax.scatter([0,], [0,], c=[1.0,0.0,0.0], s=50)
fig.tight_layout()
fig.subplots_adjust(left=0.20, bottom=0.15)
ax.grid(grid)
ax.set_title(title)
ax.set_xlabel("x [meter]")
ax.set_ylabel("y [meter]")
ax.axis([-1.0, 1.0, -1.0, 1.0])
windowsSize = 10000
startTime = time.time()
with writer.saving(fig, filename, 100):
nbPoints = 0
data = np.zeros((windowsSize, 2), dtype=np.float32)
for n, (position, orientation) in enumerate(zip(framesPos,framesOri)):
# Update buffer
data[0:-1:,:] = data[1::,:]
data[-1,:] = position[0:2]
if nbPoints < windowsSize:
nbPoints += 1
# Convert from quaternion (w, x, y, z) to rotation matrix
x,y,z,w = orientation
quaternion = np.array([w,x,y,z])
R = quat2mat(quaternion)
# Apply the rotation to the vector pointing in the forward direction (x-axis)
direction = np.array([1.0, 0.0, 0.0])
vector = np.dot(R, direction)
vector /= np.linalg.norm(vector, 2)
if n % int(downsampleRatio) == 0:
cdata = data[windowsSize-nbPoints:,:]
scatPast.set_offsets(cdata)
scatCur.set_offsets(cdata[-1,:])
border = 0.5
xlim = np.array([np.min(cdata[:,0])-border, np.max(cdata[:,0])+border])
ylim = np.array([np.min(cdata[:,1])-border, np.max(cdata[:,1])+border])
scale = np.max([xlim[1] - xlim[0], ylim[1] - ylim[0]])
ax.set_xlim(xlim)
ax.set_ylim(ylim)
x, y = cdata[-1,0], cdata[-1,1]
dx, dy = vector[0] * 0.05 * scale, vector[1] * 0.05 * scale
lines = ax.plot([x, x+dx], [y, y+dy], c='r')
writer.grab_frame()
lines.pop(0).remove()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(framesPos)/elapsedTime))
plt.close(fig)
def exportBatteryFramesAsVideo(frames, fs, filename, title, labels, ylim=None, windowSize=None, grid=False, legend=None, fsVideo=None):
ndim = 2
assert frames.shape[1] == ndim
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
ax = fig.add_subplot(111)
fig.tight_layout()
fig.subplots_adjust(left=0.20, bottom=0.15, right=0.80)
if windowSize is None:
windowSize = int(2*fs)
xlabel, ylabel, ylabel2 = labels
ax.grid(grid)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax2 = ax.twinx()
ax2.set_ylabel(ylabel2)
if ylim is not None:
ax.set_xlim([-windowSize/fs, 0.0])
ax.set_ylim(ylim[0])
ax2.set_xlim([-windowSize/fs, 0.0])
ax2.set_ylim(ylim[1])
xdata=-np.arange(windowSize)[::-1]/float(fs)
ydata=np.zeros(windowSize)
lines = []
lines.append(ax.plot(xdata,ydata, '-r')[0])
lines.append(ax2.plot(xdata,ydata, '-b')[0])
if legend is not None:
l1 = ax.legend([legend[0],], loc='upper left')
plt.setp(l1.get_texts(),fontsize='small')
l2 = ax2.legend([legend[1],], loc='upper right')
plt.setp(l2.get_texts(),fontsize='small')
data = np.zeros((windowSize, ndim))
startTime = time.time()
with writer.saving(fig, filename, 100):
for n, frame in enumerate(frames):
# Update buffer
data[0:-1:,:] = data[1::,:]
data[-1,:] = frame
if n % int(downsampleRatio) == 0:
# Update existing line plots
for i in range(ndim):
ydata=np.array(data[:,i])
lines[i].set_data(xdata, ydata)
writer.grab_frame()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(frames)/elapsedTime))
plt.close(fig)
def exportSensorFramesAsVideo(frames, fs, filename, title, labels, ylim=None, windowSize=None, grid=False, legend=None, fsVideo=None):
ndim = frames.shape[1]
assert ndim <= 6
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
ax = fig.add_subplot(111)
fig.tight_layout()
fig.subplots_adjust(left=0.20, bottom=0.15)
if windowSize is None:
windowSize = int(2*fs)
xlabel, ylabel = labels
ax.grid(grid)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if ylim is not None and ylim is not 'boolean':
ax.axis([-windowSize/fs, 0.0, ylim[0], ylim[1]])
elif ylim == 'boolean':
ax.axis([-windowSize/fs, 0.0, -0.1, 1.1])
plt.yticks([0.0, 1.0], ['Off', 'On'])
xdata=-np.arange(windowSize)[::-1]/float(fs)
ydata=np.zeros(windowSize)
lines = []
colors = ['-r', '-g', '-b', 'c', 'm', 'y']
for i in range(ndim):
lines.append(ax.plot(xdata,ydata, colors[i])[0])
if legend is not None:
l = ax.legend(legend, loc='upper left')
plt.setp(l.get_texts(),fontsize='small')
ymax = 0.0
data = np.zeros((windowSize, ndim))
startTime = time.time()
with writer.saving(fig, filename, 100):
for n, frame in enumerate(frames):
# Update buffer
data[0:-1:,:] = data[1::,:]
data[-1,:] = frame
if n % int(downsampleRatio) == 0:
# Update existing line plots
for i in range(ndim):
ydata=np.array(data[:,i])
lines[i].set_data(xdata, ydata)
if ylim is None:
cmax = np.max(np.abs(data))
if cmax > ymax:
ymax = cmax
ax.axis([-windowSize/fs, 0.0, -ymax, ymax])
writer.grab_frame()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(frames)/elapsedTime))
plt.close(fig)
def exportBatteryStatusFramesAsVideo(frames, fs, filename, title, labels=['Time [sec]', 'Status'], windowSize=None, fsVideo=None):
ndim = frames.shape[1]
assert ndim <= 1
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
ax = fig.add_subplot(111)
fig.tight_layout()
fig.subplots_adjust(left=0.30, bottom=0.15)
if windowSize is None:
windowSize = int(2*fs)
xlabel, ylabel = labels
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.axis([-windowSize/fs, 0.0, -0.1, 4.1])
plt.yticks([0.0, 1.0, 2.0, 3.0, 4.0], ['Unknown', 'Charging', 'Discharging', 'Not charging', 'Full'])
xdata=-np.arange(windowSize)[::-1]/float(fs)
ydata=np.zeros(windowSize)
lines = []
colors = ['-r', '-g', '-b', 'c', 'm', 'y']
for i in range(ndim):
lines.append(ax.plot(xdata,ydata, colors[i])[0])
ymax = 0.0
data = np.zeros((windowSize, ndim))
startTime = time.time()
with writer.saving(fig, filename, 100):
for n, frame in enumerate(frames):
# Update buffer
data[0:-1:,:] = data[1::,:]
data[-1,:] = frame
if n % int(downsampleRatio) == 0:
# Update existing line plots
for i in range(ndim):
ydata=np.array(data[:,i])
lines[i].set_data(xdata, ydata)
writer.grab_frame()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(frames)/elapsedTime))
plt.close(fig)
def exportFlowFramesAsVideo(framesFlow, fs, filename, fsVideo=None):
if fsVideo is None:
fsVideo = fs
downsampleRatio = np.max([1, int(fs/fsVideo)])
# Create the video file writer
writer = animation.FFMpegWriter(fps=fs/float(downsampleRatio), codec='libx264', extra_args=['-preset', 'ultrafast'])
# NOTE: hardcoded image size and border used to compute optical flow
ih, iw = 240, 320
border = 0.1
h,w = framesFlow[0].shape[:2]
y, x = np.meshgrid(np.linspace(border*ih, (1.0-border)*ih, h, dtype=np.int),
np.linspace(border*iw, (1.0-border)*iw, w, dtype=np.int),
indexing='ij')
fig = plt.figure(figsize=(5,4), facecolor='white', frameon=False)
ax = fig.add_subplot(111)
q = ax.quiver(x, y, np.zeros((h,w)), np.zeros((h,w)), edgecolor='k', scale=1, scale_units='xy')
ax.invert_yaxis()
plt.axis('off')
fig.tight_layout()
startTime = time.time()
with writer.saving(fig, filename, 100):
for n, flow in enumerate(framesFlow):
if n % int(downsampleRatio) == 0:
q.set_UVC(flow[:,:,0], -flow[:,:,1])
writer.grab_frame()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (len(framesFlow)/elapsedTime))
plt.close(fig)
def processIRrange(dataset, outDirPath, fsVideo=None):
group = 'collision'
name = 'range'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Convert raw data into the interval [0,1]
maxValue = 65535
raw = np.array(raw, dtype=np.float32) / maxValue
title = 'IR range sensors'
labels = ['Time [sec]', "Amplitude"]
ylim=[0, 0.25]
legend = ['Wall', 'Cliff left', 'Cliff front-left', 'Cliff front-right', 'Cliff right']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processContacts(dataset, outDirPath, name=None, fsVideo=None):
group = 'collision'
names = ['contact', 'cliff', 'wheel-drop']
if name is not None:
names = [name,]
for name in names:
[_, _, raw, clock, shape] = dataset.getStates('switch', group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Convert raw data into the interval [0,1]
raw = np.array(raw, dtype=np.float32)
if name == 'contact':
# Only keep a subset of the sensors related to bumpers and wall
raw = raw[:, [0,1,9,10]]
title = 'Contact sensors'
legend = ['bumper left', 'bumper right', 'wall', 'virtual wall']
elif name == 'cliff':
# Only keep a subset of the sensors related to cliff sensors
raw = raw[:, [5,6,7,8]]
title = 'Cliff sensors'
legend = ['left', 'front-left', 'front-right', 'right']
elif name == 'wheel-drop':
# Only keep a subset of the sensors related to wheel and caster drop
raw = raw[:, [2,3,4]]
title = 'Wheel drop sensors'
legend = ['caster', 'wheel left', 'wheel right']
labels = ['Time [sec]', "Activation"]
ylim = 'boolean'
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processBattery(dataset, outDirPath, fsVideo=None):
group = 'battery'
name = 'charge'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
name = 'charge-voltage'
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Select only the subset related to battery voltage and current (respectively)
raw = np.array(raw[:,[0,1]], dtype=np.float32)
# Convert current from A to mA
raw[:,1] = raw[:,1]*1000.0
title = 'Battery'
labels = ['Time [sec]', "Voltage [V]", 'Current [mA]']
ylim=[[10.0, 18.0], [-2000.0, 2000.0]]
legend = ['Voltage', 'Current']
exportBatteryFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processBatteryCharge(dataset, outDirPath, fsVideo=None):
group = 'battery'
name = 'charge'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Select only the subset related to battery charge
raw = np.array(raw[:,[2,]], dtype=np.float32)
# Convert charge from Ah to mAh
raw = raw * 1000.0
title = 'Battery charge'
labels = ['Time [sec]', "Charge [mAh]"]
ylim=[0.0, 4000.0]
legend = None
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processBatteryPercentage(dataset, outDirPath, fsVideo=None):
group = 'battery'
name = 'charge'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
name = 'charge-percentage'
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Select only the subset related to battery percentage
raw = np.array(raw[:,[5,]], dtype=np.float32) * 100.0
title = 'Battery percentage'
labels = ['Time [sec]', "Charge [%]"]
ylim=[0.0, 105.0]
legend = None
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processBatteryStatus(dataset, outDirPath, fsVideo=None):
group = 'battery'
name = 'status'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Select only the subset related to battery status
raw = np.array(raw[:,[0,]], dtype=np.int)
title = 'Battery status'
labels = ['Time [sec]', "Status"]
exportBatteryStatusFramesAsVideo(raw, fs, outputVideoFile, title, labels, windowSize=int(2*fs), fsVideo=fsVideo)
def processPosition(dataset, outDirPath, fsVideo=None):
group = 'odometry'
name = 'position'
[_, _, rawPos, clock, _] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
[_, _, rawOri, clock, _] = dataset.getStates('orientation', group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, 'orientation', group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'Odometry position and orientation (x-y)'
exportPositionFramesAsVideo(rawPos, rawOri, fs, outputVideoFile, title, grid=True, fsVideo=fsVideo)
def processOrientation(dataset, outDirPath, fsVideo=None, rawValues=False):
group = 'imu'
if rawValues:
name = 'orientation_raw'
else:
name = 'orientation'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'Orientation'
exportQuaternionFramesAsVideo(raw, fs, outputVideoFile, title, grid=False, fsVideo=fsVideo)
def processMotors(dataset, outDirPath, fsVideo=None):
group = 'motors'
name = 'speed'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'Motor velocity'
labels = ['Time [sec]', "Motor velocity [mm/s]"]
ylim=[-250, 250]
legend = ['left', 'right']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processImu(dataset, outDirPath, name=None, fsVideo=None):
group = 'imu'
names = {'imu-gyro':'angular_velocity', 'imu-accel':'linear_acceleration', 'imu-mag':'magnetic_field'}
# Convert dictionary to list
if name is not None:
names = [names[name],]
else:
names = names.items()
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
if name == 'angular_velocity':
title = 'Angular velocity'
labels = ['Time [sec]', "Angular velocity [rad/s]"]
ylim=[-2.0, 2.0]
elif name == 'linear_acceleration':
title = 'Linear acceleration'
labels = ['Time [sec]', "Linear acceleration [m/s^2]"]
ylim=[-12.0, 12.0]
elif name == 'magnetic_field':
title = 'Magnetic field'
labels = ['Time [sec]', "Magnetic field [uT]"]
raw *= 1e6 # Convert from T to uT
ylim=[-50.0, 50.0]
legend = ['x-axis', 'y-axis', 'z-axis']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processImuRaw(dataset, outDirPath, name=None, fsVideo=None):
group = 'imu'
names = {'imu-gyro-raw':'angular_velocity_raw', 'imu-accel-raw':'linear_acceleration_raw'}
# Convert dictionary to list
if name is not None:
names = [names[name],]
else:
names = names.items()
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
if name == 'angular_velocity_raw':
title = 'Angular velocity (raw)'
labels = ['Time [sec]', "Angular velocity [rad/s]"]
ylim=[-2.0, 2.0]
elif name == 'linear_acceleration_raw':
title = 'Linear acceleration (raw)'
labels = ['Time [sec]', "Linear acceleration [m/s^2]"]
ylim=[-12.0, 12.0]
legend = ['x-axis', 'y-axis', 'z-axis']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processAudioSignal(dataset, outDirPath, name=None, fs=16000, tolerance=0.25, fsVideo=None):
group = 'audio'
names = {'audio-signal-left':'left', 'audio-signal-right':'right'}
# Convert dictionary to list
if name is not None:
names = [names[name],]
else:
names = names.items()
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fps = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated packet sampling rate of %d Hz for %s (group: %s)' % (fps, name, group))
# Validate audio length
audioLength = raw.shape[0] * raw.shape[1] / float(fs)
referenceLength = clock[-1] + raw.shape[1] / float(fs)
if not np.allclose(audioLength, referenceLength, atol=tolerance):
logger.warn('Audio clock is incoherent: audio length is %f sec, but clock says %f sec' % (audioLength, referenceLength))
raw = np.array(raw.flatten(), dtype=np.float32)[:,np.newaxis] / np.iinfo('int16').max
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
if name == 'left':
title = 'Microphone (left)'
elif name == 'right':
title = 'Microphone (right)'
labels = ['Time [sec]', "Ampitude"]
ylim=[-1.0, 1.0]
legend = None
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processTemperature(dataset, outDirPath, fsVideo=None):
group = 'imu'
name = 'temperature'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'IMU temperature'
labels = ['Time [sec]', "Temperature [celcius]"]
ylim=[15.0, 50.0]
legend = None
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processPressure(dataset, outDirPath, fsVideo=None):
group = 'imu'
name = 'pressure'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Convert from Pa to kPa
raw = raw / 1000.0
title = 'Atmospheric pressure'
labels = ['Time [sec]', "Pressure [kPa]"]
normalPressure = 101.325 # Average sea-level pressure
variation = 3.386
#ylim=[normalPressure - variation, normalPressure + variation]
ylim=[np.min(raw), np.max(raw)]
legend = None
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processOdometryTwistAngular(dataset, outDirPath, fsVideo=None):
group = 'odometry'
name = 'twist_angular'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'Odometry angular velocity'
labels = ['Time [sec]', "Angular velocity [rad/s]"]
ylim=[-2.0, 2.0]
legend = ['x-axis', 'y-axis', 'z-axis']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processOdometryTwistLinear(dataset, outDirPath, fsVideo=None):
group = 'odometry'
name = 'twist_linear'
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
windowSize = 2*fs
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
title = 'Odometry linear velocity'
labels = ['Time [sec]', "Linear velocity [m/s]"]
ylim=[-0.5, 0.5]
legend = ['x-axis', 'y-axis', 'z-axis']
exportSensorFramesAsVideo(raw, fs, outputVideoFile, title, labels, ylim, windowSize=int(2*fs), grid=False, legend=legend, fsVideo=fsVideo)
def processAudio(dataset, outDirPath, fs=16000, tolerance=0.25):
group = 'audio'
names = ['left', 'right']
data = []
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fps = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated packet sampling rate of %d Hz for %s (group: %s)' % (fps, name, group))
# Validate audio length
audioLength = raw.shape[0] * raw.shape[1] / float(fs)
referenceLength = clock[-1] + raw.shape[1] / float(fs)
if not np.allclose(audioLength, referenceLength, atol=tolerance):
logger.warn('Audio clock is incoherent: audio length is %f sec, but clock says %f sec' % (audioLength, referenceLength))
data.append(raw.flatten())
data = np.array(data, dtype=np.int16)
outputWavFile = os.path.abspath(os.path.join(outDirPath, '%s_left-right.wav' % (group)))
logger.info('Writing to output audio file %s' % (outputWavFile))
scipy.io.wavfile.write(outputWavFile, fs, data.T)
def processVideo(dataset, outDirPath):
group = 'video'
names = ['left', 'right']
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fps = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fps, name, group))
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
# Decode first frame to check image size
data = raw[0,:shape[0,0]]
img = cv2.imdecode(data, flags=1) # cv2.CV_LOAD_IMAGE_COLOR
height, width, layers = img.shape
# Initialize video writer based on image size
if is_cv2():
codec = cv2.cv.CV_FOURCC(*'XVID')
elif is_cv3():
codec = cv2.VideoWriter_fourcc(*'XVID')
writer = cv2.VideoWriter(outputVideoFile, codec, fps, (width, height))
# Process each frame
startTime = time.time()
nbFrames = len(raw)
for i in range(nbFrames):
data = raw[i,:shape[i,0]]
# Decode raw JPEG data into image
img = cv2.imdecode(data, flags=1) # cv2.CV_LOAD_IMAGE_COLOR
# Write frame
writer.write(img)
# Close video writer
writer.release()
elapsedTime = time.time() - startTime
logger.info('FPS = %f frame/sec' % (nbFrames/elapsedTime))
def processFlow(dataset, outDirPath, fsVideo=None):
group = 'flow'
names = ['left', 'right']
for name in names:
[_, _, raw, clock, shape] = dataset.getStates(name, group)
# Estimate sampling rate from clock
fs = int(np.round(1.0/np.mean(clock[1:] - clock[:-1])))
logger.info('Estimated sampling rate of %d Hz for %s (group: %s)' % (fs, name, group))
outputVideoFile = os.path.abspath(os.path.join(outDirPath, '%s_%s.avi' % (group, name)))
logger.info('Writing to output video file %s' % (outputVideoFile))
exportFlowFramesAsVideo(raw, fs, outputVideoFile, fsVideo=fsVideo)
def process(name, datasetPath, outDirPath, fsVideo=None):
with Hdf5Dataset(datasetPath, mode='r') as dataset:
if name == 'position':
processPosition(dataset, outDirPath, fsVideo)
elif name == 'orientation':
processOrientation(dataset, outDirPath, fsVideo)
elif name == 'orientation-raw':
processOrientation(dataset, outDirPath, fsVideo, rawValues=True)
elif name == 'motors':
processMotors(dataset, outDirPath, fsVideo)
elif name == 'video':
processVideo(dataset, outDirPath)
elif name == 'flow':
processFlow(dataset, outDirPath, fsVideo)
elif name == 'imu-accel':
processImu(dataset, outDirPath, name, fsVideo)
elif name == 'imu-gyro':
processImu(dataset, outDirPath, name, fsVideo)
elif name == 'imu-mag':
processImu(dataset, outDirPath, name, fsVideo)
elif name == 'imu-accel-raw':
processImuRaw(dataset, outDirPath, name, fsVideo)
elif name == 'imu-gyro-raw':
processImuRaw(dataset, outDirPath, name, fsVideo)
elif name == 'range':
processIRrange(dataset, outDirPath, fsVideo)
elif name == 'contact':
processContacts(dataset, outDirPath, name, fsVideo)
elif name == 'wheel-drop':
processContacts(dataset, outDirPath, name, fsVideo)
elif name == 'cliff':
processContacts(dataset, outDirPath, name, fsVideo)
elif name == 'battery':
processBattery(dataset, outDirPath, fsVideo)
elif name == 'battery-charge':
processBatteryCharge(dataset, outDirPath, fsVideo)
elif name == 'battery-percentage':
processBatteryPercentage(dataset, outDirPath, fsVideo)
elif name == 'battery-status':
processBatteryStatus(dataset, outDirPath, fsVideo)
elif name == 'audio':
processAudio(dataset, outDirPath)
elif name == 'audio-signal-left':
processAudioSignal(dataset, outDirPath, name, fsVideo=fsVideo)
elif name == 'audio-signal-right':
processAudioSignal(dataset, outDirPath, name, fsVideo=fsVideo)
elif name == 'twist_linear':
processOdometryTwistLinear(dataset, outDirPath, fsVideo)
elif name == 'twist_angular':
processOdometryTwistAngular(dataset, outDirPath, fsVideo)
elif name == 'imu-temperature':
processTemperature(dataset, outDirPath, fsVideo)
elif name == 'imu-pressure':
processPressure(dataset, outDirPath, fsVideo)
else:
raise Exception('Unknown name: %s' % (name))
def main(args=None):
parser = OptionParser()
parser.add_option("-i", "--input", dest="input", default=None,
help='specify the path of the input hdf5 dataset file')
parser.add_option("-o", "--output-dir", dest="outputDir", default='.',
help='specify the path of the output directory')
parser.add_option("-c", "--nb-processes", dest="nbProcesses", type='int', default=1,
help='Specify the number of parallel processes to spawn')
parser.add_option("-d", "--fs-video", dest="fsVideo", type='int', default=-1,
help='Specify the framerate (Hz) of the output videos')
parser.add_option("-t", "--sensors", dest="sensors", default=None,
help='Specify the sensors from which to export videos')
(options,args) = parser.parse_args(args=args)
datasetPath = os.path.abspath(options.input)
logger.info('Using input HDF5 dataset file: %s' % (datasetPath))
outDirPath = os.path.abspath(options.outputDir)
logger.info('Using output directory: %s' % (outDirPath))
if not os.path.exists(outDirPath):
logger.info('Creating output directory: %s' % (outDirPath))
os.makedirs(outDirPath)
if options.nbProcesses <= 0:
nbProcesses = multiprocessing.cpu_count()
else:
nbProcesses = options.nbProcesses
logger.info('Using a multiprocessing pool of %d' % (nbProcesses))
p = LoggingPool(processes=nbProcesses, maxtasksperchild=1)
logger.info('Using a downsampling ratio of %d' % (options.fsVideo))
defaultNames = ['orientation-raw', 'imu-accel-raw', 'imu-gyro-raw', 'twist_linear', 'twist_angular','imu-temperature',
'position', 'audio-signal-left', 'audio-signal-right', 'orientation','battery',
'contact','cliff','wheel-drop','range', 'imu-accel', 'imu-gyro', 'imu-mag', 'imu-pressure',
'motors', 'video', 'flow', 'audio', 'battery-charge', 'battery-percentage', 'battery-status']
if options.sensors is not None:
names = str(options.sensors).split(',')
for name in names:
if name not in defaultNames:
raise Exception('Unknown sensor name: %s' % (name))
else:
names = defaultNames
logger.info('Exporting videos for those sensors: %s' % (','.join(names)))
fsVideo = options.fsVideo
if fsVideo <= 0:
fsVideo = None
for name in names:
p.apply_async(process, args=(name, datasetPath, outDirPath, fsVideo))
p.close()
p.join()
logger.info('All done.')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
bsd-3-clause
|
rajat1994/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
251
|
15097
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
4shadoww/hakkuframework
|
core/lib/scapy/layers/inet.py
|
2
|
131909
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
IPv4 (Internet Protocol v4).
"""
import os,time,struct,re,socket,types
from select import select
from collections import defaultdict
from scapy.utils import checksum,is_private_addr
from scapy.layers.l2 import *
from scapy.config import conf
from scapy.fields import *
from scapy.packet import *
from scapy.volatile import *
from scapy.sendrecv import sr,sr1,srp1
from scapy.plist import PacketList,SndRcvList
from scapy.automaton import Automaton,ATMT
import scapy.as_resolvers
####################
## IP Tools class ##
####################
class IPTools:
"""Add more powers to a class that have a "src" attribute."""
def whois(self):
os.system("whois %s" % self.src)
def ottl(self):
t = [32,64,128,255]+[self.ttl]
t.sort()
return t[t.index(self.ttl)+1]
def hops(self):
return self.ottl()-self.ttl-1
def is_priv_addr(self):
return is_private_addr(self.src)
_ip_options_names = { 0: "end_of_list",
1: "nop",
2: "security",
3: "loose_source_route",
4: "timestamp",
5: "extended_security",
6: "commercial_security",
7: "record_route",
8: "stream_id",
9: "strict_source_route",
10: "experimental_measurement",
11: "mtu_probe",
12: "mtu_reply",
13: "flow_control",
14: "access_control",
15: "encode",
16: "imi_traffic_descriptor",
17: "extended_IP",
18: "traceroute",
19: "address_extension",
20: "router_alert",
21: "selective_directed_broadcast_mode",
23: "dynamic_packet_state",
24: "upstream_multicast_packet",
25: "quick_start",
30: "rfc4727_experiment",
}
class _IPOption_HDR(Packet):
fields_desc = [ BitField("copy_flag",0, 1),
BitEnumField("optclass",0,2,{0:"control",2:"debug"}),
BitEnumField("option",0,5, _ip_options_names) ]
class IPOption(Packet):
name = "IP Option"
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B", # Only option 0 and 1 have no length and value
length_of="value", adjust=lambda pkt,l:l+2),
StrLenField("value", "",length_from=lambda pkt:pkt.length-2) ]
def extract_padding(self, p):
return b"",p
registered_ip_options = {}
@classmethod
def register_variant(cls):
cls.registered_ip_options[cls.option.default] = cls
@classmethod
def dispatch_hook(cls, pkt=None, *args, **kargs):
if pkt:
opt = pkt[0]&0x1f
if opt in cls.registered_ip_options:
return cls.registered_ip_options[opt]
return cls
class IPOption_EOL(IPOption):
name = "IP Option End of Options List"
option = 0
fields_desc = [ _IPOption_HDR ]
class IPOption_NOP(IPOption):
name = "IP Option No Operation"
option=1
fields_desc = [ _IPOption_HDR ]
class IPOption_Security(IPOption):
name = "IP Option Security"
copy_flag = 1
option = 2
fields_desc = [ _IPOption_HDR,
ByteField("length", 11),
ShortField("security",0),
ShortField("compartment",0),
ShortField("handling_restrictions",0),
StrFixedLenField("transmission_control_code","xxx",3),
]
class IPOption_LSRR(IPOption):
name = "IP Option Loose Source and Record Route"
copy_flag = 1
option = 3
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="routers", adjust=lambda pkt,l:l+3),
ByteField("pointer",4), # 4 is first IP
FieldListField("routers",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-3)
]
def get_current_router(self):
return self.routers[self.pointer//4-1]
class IPOption_RR(IPOption_LSRR):
name = "IP Option Record Route"
option = 7
class IPOption_SSRR(IPOption_LSRR):
name = "IP Option Strict Source and Record Route"
option = 9
class IPOption_Stream_Id(IPOption):
name = "IP Option Stream ID"
option = 8
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("security",0), ]
class IPOption_MTU_Probe(IPOption):
name = "IP Option MTU Probe"
option = 11
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortField("mtu",0), ]
class IPOption_MTU_Reply(IPOption_MTU_Probe):
name = "IP Option MTU Reply"
option = 12
class IPOption_Traceroute(IPOption):
name = "IP Option Traceroute"
copy_flag = 1
option = 18
fields_desc = [ _IPOption_HDR,
ByteField("length", 12),
ShortField("id",0),
ShortField("outbound_hops",0),
ShortField("return_hops",0),
IPField("originator_ip","0.0.0.0") ]
class IPOption_Address_Extension(IPOption):
name = "IP Option Address Extension"
copy_flag = 1
option = 19
fields_desc = [ _IPOption_HDR,
ByteField("length", 10),
IPField("src_ext","0.0.0.0"),
IPField("dst_ext","0.0.0.0") ]
class IPOption_Router_Alert(IPOption):
name = "IP Option Router Alert"
copy_flag = 1
option = 20
fields_desc = [ _IPOption_HDR,
ByteField("length", 4),
ShortEnumField("alert",0, {0:"router_shall_examine_packet"}), ]
class IPOption_SDBM(IPOption):
name = "IP Option Selective Directed Broadcast Mode"
copy_flag = 1
option = 21
fields_desc = [ _IPOption_HDR,
FieldLenField("length", None, fmt="B",
length_of="addresses", adjust=lambda pkt,l:l+2),
FieldListField("addresses",[],IPField("","0.0.0.0"),
length_from=lambda pkt:pkt.length-2)
]
TCPOptions = (
{ 0 : ("EOL",None),
1 : ("NOP",None),
2 : ("MSS","!H"),
3 : ("WScale","!B"),
4 : ("SAckOK",None),
5 : ("SAck","!"),
8 : ("Timestamp","!II"),
14 : ("AltChkSum","!BH"),
15 : ("AltChkSumOpt",None),
25 : ("Mood","!p")
},
{ "EOL":0,
"NOP":1,
"MSS":2,
"WScale":3,
"SAckOK":4,
"SAck":5,
"Timestamp":8,
"AltChkSum":14,
"AltChkSumOpt":15,
"Mood":25
} )
class TCPOptionsField(StrField):
islist=1
def getfield(self, pkt, s):
opsz = (pkt.dataofs-5)*4
if opsz < 0:
warning("bad dataofs (%i). Assuming dataofs=5"%pkt.dataofs)
opsz = 0
return s[opsz:],self.m2i(pkt,s[:opsz])
def m2i(self, pkt, x):
opt = []
while x:
onum = x[0]
if onum == 0:
opt.append(("EOL",None))
x=x[1:]
break
if onum == 1:
opt.append(("NOP",None))
x=x[1:]
continue
olen = x[1]
if olen < 2:
warning("Malformed TCP option (announced length is %i)" % olen)
olen = 2
oval = x[2:olen]
if onum in TCPOptions[0]:
oname, ofmt = TCPOptions[0][onum]
if onum == 5: #SAck
ofmt += "%iI" % (len(oval)//4)
if ofmt and struct.calcsize(ofmt) == len(oval):
oval = struct.unpack(ofmt, oval)
if len(oval) == 1:
oval = oval[0]
opt.append((oname, oval))
else:
opt.append((onum, oval))
x = x[olen:]
return opt
def i2m(self, pkt, x):
opt = b""
for oname,oval in x:
if type(oname) is str:
if oname == "NOP":
opt += b"\x01"
continue
elif oname == "EOL":
opt += b"\x00"
continue
elif oname in TCPOptions[1]:
onum = TCPOptions[1][oname]
ofmt = TCPOptions[0][onum][1]
if onum == 5: #SAck
ofmt += "%iI" % len(oval)
if ofmt is not None and (type(oval) is not str or "s" in ofmt):
if type(oval) is not tuple:
oval = (oval,)
oval = struct.pack(ofmt, *oval)
else:
warning("option [%s] unknown. Skipped."%oname)
continue
else:
onum = oname
if type(oval) is not str:
warning("option [%i] is not string."%onum)
continue
opt += bytes([(onum), (2+len(oval))]) + oval
return opt+b"\x00"*(3-((len(opt)+3)%4))
def randval(self):
return [] # XXX
class ICMPTimeStampField(IntField):
re_hmsm = re.compile("([0-2]?[0-9])[Hh:](([0-5]?[0-9])([Mm:]([0-5]?[0-9])([sS:.]([0-9]{0,3}))?)?)?$")
def i2repr(self, pkt, val):
if val is None:
return "--"
else:
sec, milli = divmod(val, 1000)
min, sec = divmod(sec, 60)
hour, min = divmod(min, 60)
return "%d:%d:%d.%d" %(hour, min, sec, int(milli))
def any2i(self, pkt, val):
if type(val) is str:
hmsms = self.re_hmsm.match(val)
if hmsms:
h,_,m,_,s,_,ms = hmsms = hmsms.groups()
ms = int(((ms or "")+"000")[:3])
val = ((int(h)*60+int(m or 0))*60+int(s or 0))*1000+ms
else:
val = 0
elif val is None:
val = int((time.time()%(24*60*60))*1000)
return val
class IP(Packet, IPTools):
name = "IP"
fields_desc = [ BitField("version" , 4 , 4),
BitField("ihl", None, 4),
XByteField("tos", 0),
ShortField("len", None),
ShortField("id", 1),
FlagsField("flags", 0, 3, ["MF","DF","evil"]),
BitField("frag", 0, 13),
ByteField("ttl", 64),
ByteEnumField("proto", 0, IP_PROTOS),
XShortField("chksum", None),
#IPField("src", "127.0.0.1"),
Emph(SourceIPField("src","dst")),
Emph(IPField("dst", "127.0.0.1")),
PacketListField("options", [], IPOption, length_from=lambda p:p.ihl*4-20) ]
def post_build(self, p, pay):
ihl = self.ihl
p += b"\0"*((-len(p))%4) # pad IP options if needed
if ihl is None:
ihl = len(p)//4
p = bytes([((self.version&0xf)<<4) | ihl&0x0f])+p[1:]
if self.len is None:
l = len(p)+len(pay)
p = p[:2]+struct.pack("!H", l)+p[4:]
if self.chksum is None:
ck = checksum(p)
p = p[:10]+bytes([ck>>8])+bytes([ck&0xff])+p[12:]
return p+pay
def extract_padding(self, s):
l = self.len - (self.ihl << 2)
return s[:l],s[l:]
def send(self, s, slp=0):
for p in self:
try:
s.sendto(bytes(p), (p.dst,0))
except socket.error as msg:
log_runtime.error(msg)
if slp:
time.sleep(slp)
def route(self):
dst = self.dst
if isinstance(dst,Gen):
dst = next(iter(dst))
return conf.route.route(dst)
def hashret(self):
if ( (self.proto == socket.IPPROTO_ICMP)
and (isinstance(self.payload, ICMP))
and (self.payload.type in [3,4,5,11,12]) ):
return self.payload.payload.hashret()
else:
if conf.checkIPsrc and conf.checkIPaddr:
return strxor(inet_aton(self.src),inet_aton(self.dst))+struct.pack("B",self.proto)+self.payload.hashret()
else:
return struct.pack("B", self.proto)+self.payload.hashret()
def answers(self, other):
if not isinstance(other,IP):
return 0
if conf.checkIPaddr and (self.dst != other.src):
return 0
if ( (self.proto == socket.IPPROTO_ICMP) and
(isinstance(self.payload, ICMP)) and
(self.payload.type in [3,4,5,11,12]) ):
# ICMP error message
return self.payload.payload.answers(other)
else:
if ( (conf.checkIPaddr and (self.src != other.dst)) or
(self.proto != other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
s = self.sprintf("%IP.src% > %IP.dst% %IP.proto%")
if self.frag:
s += " frag:%i" % self.frag
return s
def fragment(self, fragsize=1480):
"""Fragment IP datagrams"""
fragsize = (fragsize+7)//8*8
lst = []
fnb = 0
fl = self
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = bytes(p[fnb].payload)
nb = (len(s)+fragsize-1)//fragsize
for i in range(nb):
q = p.copy()
del(q[fnb].payload)
del(q[fnb].chksum)
del(q[fnb].len)
if i == nb-1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i*fragsize//8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
class TCP(Packet):
name = "TCP"
fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
ShortEnumField("dport", 80, TCP_SERVICES),
IntField("seq", 0),
IntField("ack", 0),
BitField("dataofs", None, 4),
BitField("reserved", 0, 4),
FlagsField("flags", 0x2, 8, "FSRPAUEC"),
ShortField("window", 8192),
XShortField("chksum", None),
ShortField("urgptr", 0),
TCPOptionsField("options", {}) ]
def post_build(self, p, pay):
p += pay
dataofs = self.dataofs
if dataofs is None:
dataofs = 5+((len(self.get_field("options").i2m(self,self.options))+3)//4)
p = p[:12]+bytes([(dataofs << 4) | (p[12])&0x0f])+p[13:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
ln = self.underlayer.len-20
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck=checksum(psdhdr+p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_TCP, self.underlayer, p)
p = p[:16]+struct.pack("!H", ck)+p[18:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def hashret(self):
if conf.checkIPsrc:
return struct.pack("H",self.sport ^ self.dport)+self.payload.hashret()
else:
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.dport) and
(self.dport == other.sport)):
return 0
if (abs(other.seq-self.ack) > 2+len(other.payload)):
return 0
return 1
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("TCP %IP.src%:%TCP.sport% > %IP.dst%:%TCP.dport% %TCP.flags%")
elif conf.ipv6_enabled and isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("TCP %IPv6.src%:%TCP.sport% > %IPv6.dst%:%TCP.dport% %TCP.flags%")
else:
return self.sprintf("TCP %TCP.sport% > %TCP.dport% %TCP.flags%")
class UDP(Packet):
name = "UDP"
fields_desc = [ ShortEnumField("sport", 53, UDP_SERVICES),
ShortEnumField("dport", 53, UDP_SERVICES),
ShortField("len", None),
XShortField("chksum", None), ]
def post_build(self, p, pay):
p += pay
l = self.len
if l is None:
l = len(p)
p = p[:4]+struct.pack("!H",l)+p[6:]
if self.chksum is None:
if isinstance(self.underlayer, IP):
if self.underlayer.len is not None:
ln = self.underlayer.len-20
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_aton(self.underlayer.src),
inet_aton(self.underlayer.dst),
self.underlayer.proto,
ln)
ck=checksum(psdhdr+p)
p = p[:6]+struct.pack("!H", ck)+p[8:]
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6) or isinstance(self.underlayer, scapy.layers.inet6._IPv6ExtHdr):
ck = scapy.layers.inet6.in6_chksum(socket.IPPROTO_UDP, self.underlayer, p)
p = p[:6]+struct.pack("!H", ck)+p[8:]
else:
warning("No IP underlayer to compute checksum. Leaving null.")
return p
def extract_padding(self, s):
l = self.len - 8
return s[:l],s[l:]
def hashret(self):
return self.payload.hashret()
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if self.dport != other.sport:
return 0
return self.payload.answers(other.payload)
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("UDP %IP.src%:%UDP.sport% > %IP.dst%:%UDP.dport%")
elif isinstance(self.underlayer, scapy.layers.inet6.IPv6):
return self.underlayer.sprintf("UDP %IPv6.src%:%UDP.sport% > %IPv6.dst%:%UDP.dport%")
else:
return self.sprintf("UDP %UDP.sport% > %UDP.dport%")
icmptypes = { 0 : "echo-reply",
3 : "dest-unreach",
4 : "source-quench",
5 : "redirect",
8 : "echo-request",
9 : "router-advertisement",
10 : "router-solicitation",
11 : "time-exceeded",
12 : "parameter-problem",
13 : "timestamp-request",
14 : "timestamp-reply",
15 : "information-request",
16 : "information-response",
17 : "address-mask-request",
18 : "address-mask-reply" }
icmpcodes = { 3 : { 0 : "network-unreachable",
1 : "host-unreachable",
2 : "protocol-unreachable",
3 : "port-unreachable",
4 : "fragmentation-needed",
5 : "source-route-failed",
6 : "network-unknown",
7 : "host-unknown",
9 : "network-prohibited",
10 : "host-prohibited",
11 : "TOS-network-unreachable",
12 : "TOS-host-unreachable",
13 : "communication-prohibited",
14 : "host-precedence-violation",
15 : "precedence-cutoff", },
5 : { 0 : "network-redirect",
1 : "host-redirect",
2 : "TOS-network-redirect",
3 : "TOS-host-redirect", },
11 : { 0 : "ttl-zero-during-transit",
1 : "ttl-zero-during-reassembly", },
12 : { 0 : "ip-header-bad",
1 : "required-option-missing", }, }
class ICMP(Packet):
name = "ICMP"
fields_desc = [ ByteEnumField("type",8, icmptypes),
MultiEnumField("code",0, icmpcodes, depends_on=lambda pkt:pkt.type,fmt="B"),
XShortField("chksum", None),
ConditionalField(XShortField("id",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(XShortField("seq",0), lambda pkt:pkt.type in [0,8,13,14,15,16,17,18]),
ConditionalField(ICMPTimeStampField("ts_ori", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_rx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(ICMPTimeStampField("ts_tx", None), lambda pkt:pkt.type in [13,14]),
ConditionalField(IPField("gw","0.0.0.0"), lambda pkt:pkt.type==5),
ConditionalField(ByteField("ptr",0), lambda pkt:pkt.type==12),
ConditionalField(X3BytesField("reserved",0), lambda pkt:pkt.type==12),
ConditionalField(IPField("addr_mask","0.0.0.0"), lambda pkt:pkt.type in [17,18]),
ConditionalField(IntField("unused",0), lambda pkt:pkt.type not in [0,5,8,12,13,14,15,16,17,18]),
]
def post_build(self, p, pay):
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2]+bytes([ck>>8, ck&0xff])+p[4:]
return p
def hashret(self):
if self.type in [0,8,13,14,15,16,17,18]:
return struct.pack("HH",self.id,self.seq)+self.payload.hashret()
return self.payload.hashret()
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if ( (other.type,self.type) in [(8,0),(13,14),(15,16),(17,18)] and
self.id == other.id and
self.seq == other.seq ):
return 1
return 0
def guess_payload_class(self, payload):
if self.type in [3,4,5,11,12]:
return IPerror
else:
return None
def mysummary(self):
if isinstance(self.underlayer, IP):
return self.underlayer.sprintf("ICMP %IP.src% > %IP.dst% %ICMP.type% %ICMP.code%")
else:
return self.sprintf("ICMP %ICMP.type% %ICMP.code%")
class IPerror(IP):
name = "IP in ICMP"
def answers(self, other):
if not isinstance(other, IP):
return 0
if not ( ((conf.checkIPsrc == 0) or (self.dst == other.dst)) and
(self.src == other.src) and
( ((conf.checkIPID == 0)
or (self.id == other.id)
or (conf.checkIPID == 1 and self.id == socket.htons(other.id)))) and
(self.proto == other.proto) ):
return 0
return self.payload.answers(other.payload)
def mysummary(self):
return Packet.mysummary(self)
class TCPerror(TCP):
fields_desc = [ ShortEnumField("sport", 20, TCP_SERVICES),
ShortEnumField("dport", 80, TCP_SERVICES),
IntField("seq", 0) ]
name = "TCP in ICMP"
def post_build(self, p, pay):
p += pay
return p
def answers(self, other):
if not isinstance(other, TCP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
if conf.check_TCPerror_seqack:
if self.seq is not None:
if self.seq != other.seq:
return 0
if self.ack is not None:
if self.ack != other.ack:
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class UDPerror(UDP):
name = "UDP in ICMP"
def answers(self, other):
if not isinstance(other, UDP):
return 0
if conf.checkIPsrc:
if not ((self.sport == other.sport) and
(self.dport == other.dport)):
return 0
return 1
def mysummary(self):
return Packet.mysummary(self)
class ICMPerror(ICMP):
name = "ICMP in ICMP"
def answers(self, other):
if not isinstance(other,ICMP):
return 0
if not ((self.type == other.type) and
(self.code == other.code)):
return 0
if self.code in [0,8,13,14,17,18]:
if (self.id == other.id and
self.seq == other.seq):
return 1
else:
return 0
else:
return 1
def mysummary(self):
return Packet.mysummary(self)
bind_layers( Ether, IP, type=2048)
bind_layers( CookedLinux, IP, proto=2048)
bind_layers( GRE, IP, proto=2048)
bind_layers( SNAP, IP, code=2048)
bind_layers( IPerror, IPerror, frag=0, proto=4)
bind_layers( IPerror, ICMPerror, frag=0, proto=1)
bind_layers( IPerror, TCPerror, frag=0, proto=6)
bind_layers( IPerror, UDPerror, frag=0, proto=17)
bind_layers( IP, IP, frag=0, proto=4)
bind_layers( IP, ICMP, frag=0, proto=1)
bind_layers( IP, TCP, frag=0, proto=6)
bind_layers( IP, UDP, frag=0, proto=17)
bind_layers( IP, GRE, frag=0, proto=47)
conf.l2types.register(101, IP)
conf.l2types.register_num2layer(12, IP)
conf.l3types.register(ETH_P_IP, IP)
conf.l3types.register_num2layer(ETH_P_ALL, IP)
conf.neighbor.register_l3(Ether, IP, lambda l2,l3: getmacbyip(l3.dst))
conf.neighbor.register_l3(Dot3, IP, lambda l2,l3: getmacbyip(l3.dst))
###################
## Fragmentation ##
###################
@conf.commands.register
def fragment(pkt, fragsize=1480):
"""Fragment a big IP datagram"""
fragsize = (fragsize+7)//8*8
lst = []
for p in pkt:
s = bytes(p[IP].payload)
nb = (len(s)+fragsize-1)//fragsize
for i in range(nb):
q = p.copy()
del(q[IP].payload)
del(q[IP].chksum)
del(q[IP].len)
if i == nb-1:
q[IP].flags &= ~1
else:
q[IP].flags |= 1
q[IP].frag = i*fragsize//8
r = conf.raw_layer(load=s[i*fragsize:(i+1)*fragsize])
r.overload_fields = p[IP].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
def overlap_frag(p, overlap, fragsize=8, overlap_fragsize=None):
if overlap_fragsize is None:
overlap_fragsize = fragsize
q = p.copy()
del(q[IP].payload)
q[IP].add_payload(overlap)
qfrag = fragment(q, overlap_fragsize)
qfrag[-1][IP].flags |= 1
return qfrag+fragment(p, fragsize)
@conf.commands.register
def defrag(plist):
"""defrag(plist) -> ([not fragmented], [defragmented],
[ [bad fragments], [bad fragments], ... ])"""
frags = defaultdict(PacketList)
nofrag = PacketList()
for p in plist:
ip = p[IP]
if IP not in p:
nofrag.append(p)
continue
if ip.frag == 0 and ip.flags & 1 == 0:
nofrag.append(p)
continue
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
defrag = []
missfrag = []
for lst in frags.values():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag.append(lst)
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag.append(lst)
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
defrag.append(p)
defrag2=PacketList()
for p in defrag:
defrag2.append(p.__class__(bytes(p)))
return nofrag,defrag2,missfrag
@conf.commands.register
def defragment(plist):
"""defragment(plist) -> plist defragmented as much as possible """
frags = defaultdict(lambda:[])
final = []
pos = 0
for p in plist:
p._defrag_pos = pos
pos += 1
if IP in p:
ip = p[IP]
if ip.frag != 0 or ip.flags & 1:
ip = p[IP]
uniq = (ip.id,ip.src,ip.dst,ip.proto)
frags[uniq].append(p)
continue
final.append(p)
defrag = []
missfrag = []
for lst in frags.values():
lst.sort(key=lambda x: x.frag)
p = lst[0]
lastp = lst[-1]
if p.frag > 0 or lastp.flags & 1 != 0: # first or last fragment missing
missfrag += lst
continue
p = p.copy()
if conf.padding_layer in p:
del(p[conf.padding_layer].underlayer.payload)
ip = p[IP]
if ip.len is None or ip.ihl is None:
clen = len(ip.payload)
else:
clen = ip.len - (ip.ihl<<2)
txt = conf.raw_layer()
for q in lst[1:]:
if clen != q.frag<<3: # Wrong fragmentation offset
if clen > q.frag<<3:
warning("Fragment overlap (%i > %i) %r || %r || %r" % (clen, q.frag<<3, p,txt,q))
missfrag += lst
break
if q[IP].len is None or q[IP].ihl is None:
clen += len(q[IP].payload)
else:
clen += q[IP].len - (q[IP].ihl<<2)
if conf.padding_layer in q:
del(q[conf.padding_layer].underlayer.payload)
txt.add_payload(q[IP].payload.copy())
else:
ip.flags &= ~1 # !MF
del(ip.chksum)
del(ip.len)
p = p/txt
p._defrag_pos = max(x._defrag_pos for x in lst)
defrag.append(p)
defrag2=[]
for p in defrag:
q = p.__class__(bytes(p))
q._defrag_pos = p._defrag_pos
defrag2.append(q)
final += defrag2
final += missfrag
final.sort(key=lambda x: x._defrag_pos)
for p in final:
del(p._defrag_pos)
if hasattr(plist, "listname"):
name = "Defragmented %s" % plist.listname
else:
name = "Defragmented"
return PacketList(final, name=name)
### Add timeskew_graph() method to PacketList
def _packetlist_timeskew_graph(self, ip, **kargs):
"""Tries to graph the timeskew between the timestamps and real time for a given ip"""
res = map(lambda x: self._elt2pkt(x), self.res)
b = filter(lambda x:x.haslayer(IP) and x.getlayer(IP).src == ip and x.haslayer(TCP), res)
c = []
for p in b:
opts = p.getlayer(TCP).options
for o in opts:
if o[0] == "Timestamp":
c.append((p.time,o[1][0]))
if not c:
warning("No timestamps found in packet list")
return
#d = map(lambda (x,y): (x%2000,((x-c[0][0])-((y-c[0][1])/1000.0))),c)
d = map(lambda a: (a[0]%2000,((a[0]-c[0][0])-((a[1]-c[0][1])/1000.0))),c)
return plt.plot(d, **kargs)
#PacketList.timeskew_graph = types.MethodType(_packetlist_timeskew_graph, None)
### Create a new packet list
class TracerouteResult(SndRcvList):
def __init__(self, res=None, name="Traceroute", stats=None):
PacketList.__init__(self, res, name, stats, vector_index = 1)
self.graphdef = None
self.graphASres = 0
self.padding = 0
self.hloc = None
self.nloc = None
def show(self):
#return self.make_table(lambda (s,r): (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
return self.make_table(lambda s,r: (s.sprintf("%IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
s.ttl,
r.sprintf("%-15s,IP.src% {TCP:%TCP.flags%}{ICMP:%ir,ICMP.type%}")))
def get_trace(self):
raw_trace = {}
for s,r in self.res:
if IP not in s:
continue
d = s[IP].dst
if d not in raw_trace:
raw_trace[d] = {}
raw_trace[d][s[IP].ttl] = r[IP].src, ICMP not in r
trace = {}
for k in raw_trace.keys():
m = [ x for x in raw_trace[k].keys() if raw_trace[k][x][1] ]
if not m:
trace[k] = raw_trace[k]
else:
m = min(m)
trace[k] = {i: raw_trace[k][i] for i in raw_trace[k].keys() if not raw_trace[k][i][1] or i<=m}
return trace
def trace3D(self):
"""Give a 3D representation of the traceroute.
right button: rotate the scene
middle button: zoom
left button: move the scene
left button on a ball: toggle IP displaying
ctrl-left button on a ball: scan ports 21,22,23,25,80 and 443 and display the result"""
trace = self.get_trace()
import visual
class IPsphere(visual.sphere):
def __init__(self, ip, **kargs):
visual.sphere.__init__(self, **kargs)
self.ip=ip
self.label=None
self.setlabel(self.ip)
def setlabel(self, txt,visible=None):
if self.label is not None:
if visible is None:
visible = self.label.visible
self.label.visible = 0
elif visible is None:
visible=0
self.label=visual.label(text=txt, pos=self.pos, space=self.radius, xoffset=10, yoffset=20, visible=visible)
def action(self):
self.label.visible ^= 1
visual.scene = visual.display()
visual.scene.exit = True
start = visual.box()
rings={}
tr3d = {}
for i in trace:
tr = trace[i]
tr3d[i] = []
ttl = tr.keys()
for t in range(1,max(ttl)+1):
if t not in rings:
rings[t] = []
if t in tr:
if tr[t] not in rings[t]:
rings[t].append(tr[t])
tr3d[i].append(rings[t].index(tr[t]))
else:
rings[t].append(("unk",-1))
tr3d[i].append(len(rings[t])-1)
for t in rings:
r = rings[t]
l = len(r)
for i in range(l):
if r[i][1] == -1:
col = (0.75,0.75,0.75)
elif r[i][1]:
col = visual.color.green
else:
col = visual.color.blue
s = IPsphere(pos=((l-1)*visual.cos(2*i*visual.pi/l),(l-1)*visual.sin(2*i*visual.pi/l),2*t),
ip = r[i][0],
color = col)
for trlst in tr3d.values():
if t <= len(trlst):
if trlst[t-1] == i:
trlst[t-1] = s
forecol = colgen(0.625, 0.4375, 0.25, 0.125)
for trlst in tr3d.values():
col = next(forecol)
start = (0,0,0)
for ip in trlst:
visual.cylinder(pos=start,axis=ip.pos-start,color=col,radius=0.2)
start = ip.pos
movcenter=None
while 1:
visual.rate(50)
if visual.scene.kb.keys:
k = visual.scene.kb.getkey()
if k == "esc" or k == "q":
break
if visual.scene.mouse.events:
ev = visual.scene.mouse.getevent()
if ev.press == "left":
o = ev.pick
if o:
if ev.ctrl:
if o.ip == "unk":
continue
savcolor = o.color
o.color = (1,0,0)
a,b=sr(IP(dst=o.ip)/TCP(dport=[21,22,23,25,80,443]),timeout=2)
o.color = savcolor
if len(a) == 0:
txt = "%s:\nno results" % o.ip
else:
txt = "%s:\n" % o.ip
for s,r in a:
txt += r.sprintf("{TCP:%IP.src%:%TCP.sport% %TCP.flags%}{TCPerror:%IPerror.dst%:%TCPerror.dport% %IP.src% %ir,ICMP.type%}\n")
o.setlabel(txt, visible=1)
else:
if hasattr(o, "action"):
o.action()
elif ev.drag == "left":
movcenter = ev.pos
elif ev.drop == "left":
movcenter = None
if movcenter:
visual.scene.center -= visual.scene.mouse.pos-movcenter
movcenter = visual.scene.mouse.pos
## world_trace needs to be reimplemented as gnuplot dependency is removed
# def world_trace(self):
# from modules.geo import locate_ip
# ips = {}
# rt = {}
# ports_done = {}
# for s,r in self.res:
# ips[r.src] = None
# if s.haslayer(TCP) or s.haslayer(UDP):
# trace_id = (s.src,s.dst,s.proto,s.dport)
# elif s.haslayer(ICMP):
# trace_id = (s.src,s.dst,s.proto,s.type)
# else:
# trace_id = (s.src,s.dst,s.proto,0)
# trace = rt.get(trace_id,{})
# if not r.haslayer(ICMP) or r.type != 11:
# if trace_id in ports_done:
# continue
# ports_done[trace_id] = None
# trace[s.ttl] = r.src
# rt[trace_id] = trace
#
# trt = {}
# for trace_id in rt:
# trace = rt[trace_id]
# loctrace = []
# for i in range(max(trace.keys())):
# ip = trace.get(i,None)
# if ip is None:
# continue
# loc = locate_ip(ip)
# if loc is None:
# continue
## loctrace.append((ip,loc)) # no labels yet
# loctrace.append(loc)
# if loctrace:
# trt[trace_id] = loctrace
#
# tr = map(lambda x: Gnuplot.Data(x,with_="lines"), trt.values())
# g = Gnuplot.Gnuplot()
# world = Gnuplot.File(conf.gnuplot_world,with_="lines")
# g.plot(world,*tr)
# return g
def make_graph(self,ASres=None,padding=0):
if ASres is None:
ASres = conf.AS_resolver
self.graphASres = ASres
self.graphpadding = padding
ips = {}
rt = {}
ports = {}
ports_done = {}
for s,r in self.res:
r = r.getlayer(IP) or (conf.ipv6_enabled and r[scapy.layers.inet6.IPv6]) or r
s = s.getlayer(IP) or (conf.ipv6_enabled and s[scapy.layers.inet6.IPv6]) or s
ips[r.src] = None
if TCP in s:
trace_id = (s.src,s.dst,6,s.dport)
elif UDP in s:
trace_id = (s.src,s.dst,17,s.dport)
elif ICMP in s:
trace_id = (s.src,s.dst,1,s.type)
else:
trace_id = (s.src,s.dst,s.proto,0)
trace = rt.get(trace_id,{})
ttl = conf.ipv6_enabled and scapy.layers.inet6.IPv6 in s and s.hlim or s.ttl
if not (ICMP in r and r[ICMP].type == 11) and not (conf.ipv6_enabled and scapy.layers.inet6.IPv6 in r and scapy.layers.inet6.ICMPv6TimeExceeded in r):
if trace_id in ports_done:
continue
ports_done[trace_id] = None
p = ports.get(r.src,[])
if TCP in r:
p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%"))
trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%')
elif UDP in r:
p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%"))
trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%')
elif ICMP in r:
p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%"))
trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
else:
p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}"))
trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}')
ports[r.src] = p
else:
trace[ttl] = r.sprintf('"%r,src%"')
rt[trace_id] = trace
# Fill holes with unk%i nodes
unknown_label = incremental_label("unk%i")
blackholes = []
bhip = {}
for rtk in rt:
trace = rt[rtk]
k = trace.keys()
for n in range(min(k), max(k)):
if not n in trace:
trace[n] = next(unknown_label)
if not rtk in ports_done:
if rtk[2] == 1: #ICMP
bh = "%s %i/icmp" % (rtk[1],rtk[3])
elif rtk[2] == 6: #TCP
bh = "%s %i/tcp" % (rtk[1],rtk[3])
elif rtk[2] == 17: #UDP
bh = '%s %i/udp' % (rtk[1],rtk[3])
else:
bh = '%s %i/proto' % (rtk[1],rtk[2])
ips[bh] = None
bhip[rtk[1]] = bh
bh = '"%s"' % bh
trace[max(k)+1] = bh
blackholes.append(bh)
# Find AS numbers
ASN_query_list = dict.fromkeys(map(lambda x:x.rsplit(" ",1)[0],ips)).keys()
if ASres is None:
ASNlist = []
else:
ASNlist = ASres.resolve(*ASN_query_list)
ASNs = {}
ASDs = {}
for ip,asn,desc, in ASNlist:
if asn is None:
continue
iplist = ASNs.get(asn,[])
if ip in bhip:
if ip in ports:
iplist.append(ip)
iplist.append(bhip[ip])
else:
iplist.append(ip)
ASNs[asn] = iplist
ASDs[asn] = desc
backcolorlist=colgen("60","86","ba","ff")
forecolorlist=colgen("a0","70","40","20")
s = "digraph trace {\n"
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
s += "\n#ASN clustering\n"
for asn in ASNs:
s += '\tsubgraph cluster_%s {\n' % asn
col = next(backcolorlist)
s += '\t\tcolor="#%s%s%s";' % col
s += '\t\tnode [fillcolor="#%s%s%s",style=filled];' % col
s += '\t\tfontsize = 10;'
s += '\t\tlabel = "%s\\n[%s]"\n' % (asn,ASDs[asn])
for ip in ASNs[asn]:
s += '\t\t"%s";\n'%ip
s += "\t}\n"
s += "#endpoints\n"
for p in ports:
s += '\t"%s" [shape=record,color=black,fillcolor=green,style=filled,label="%s|%s"];\n' % (p,p,"|".join(ports[p]))
s += "\n#Blackholes\n"
for bh in blackholes:
s += '\t%s [shape=octagon,color=black,fillcolor=red,style=filled];\n' % bh
if padding:
s += "\n#Padding\n"
pad={}
for snd,rcv in self.res:
if rcv.src not in ports and rcv.haslayer(conf.padding_layer):
p = rcv.getlayer(conf.padding_layer).load
if p != "\x00"*len(p):
pad[rcv.src]=None
for rcv in pad:
s += '\t"%s" [shape=triangle,color=black,fillcolor=red,style=filled];\n' % rcv
s += "\n\tnode [shape=ellipse,color=black,style=solid];\n\n"
for rtk in rt:
s += "#---[%s\n" % repr(rtk)
s += '\t\tedge [color="#%s%s%s"];\n' % next(forecolorlist)
trace = rt[rtk]
k = trace.keys()
for n in range(min(k), max(k)):
s += '\t%s ->\n' % trace[n]
s += '\t%s;\n' % trace[max(k)]
s += "}\n";
self.graphdef = s
def graph(self, ASres=None, padding=0, **kargs):
"""x.graph(ASres=conf.AS_resolver, other args):
ASres=None : no AS resolver => no clustering
ASres=AS_resolver() : default whois AS resolver (riswhois.ripe.net)
ASres=AS_resolver_cymru(): use whois.cymru.com whois database
ASres=AS_resolver(server="whois.ra.net")
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
figsize: w,h tuple in inches. See matplotlib documentation
target: filename. If None uses matplotlib to display
prog: which graphviz program to use"""
if ASres is None:
ASres = conf.AS_resolver
if (self.graphdef is None or
self.graphASres != ASres or
self.graphpadding != padding):
self.make_graph(ASres,padding)
return do_graph(self.graphdef, **kargs)
@conf.commands.register
def traceroute(target, dport=80, minttl=1, maxttl=30, sport=RandShort(), l4 = None, filter=None, timeout=2, verbose=None, **kargs):
"""Instant TCP traceroute
traceroute(target, [maxttl=30,] [dport=80,] [sport=80,] [verbose=conf.verb]) -> None
"""
if verbose is None:
verbose = conf.verb
if filter is None:
# we only consider ICMP error packets and TCP packets with at
# least the ACK flag set *and* either the SYN or the RST flag
# set
filter="(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))"
if l4 is None:
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/TCP(seq=RandInt(),sport=sport, dport=dport),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
# this should always work
filter="ip"
a,b = sr(IP(dst=target, id=RandShort(), ttl=(minttl,maxttl))/l4,
timeout=timeout, filter=filter, verbose=verbose, **kargs)
a = TracerouteResult(a.res)
if verbose:
a.show()
return a,b
############################
## Multi-Traceroute Class ##
############################
class MTR:
#
# Initialize Multi-Traceroute Object Vars...
def __init__(self, nquery = 1, target = ''):
self._nquery = nquery # Number or traceroute queries
self._ntraces = 1 # Number of trace runs
self._iface = '' # Interface to use for trace
self._gw = '' # Default Gateway IPv4 Address for trace
self._netprotocol = 'TCP' # MTR network protocol to use for trace
self._target = target # Session targets
self._exptrg = [] # Expanded Session targets
self._host2ip = {} # Target Host Name to IP Address
self._ip2host = {} # Target IP Address to Host Name
self._tcnt = 0 # Total Trace count
self._tlblid = [] # Target Trace label IDs
self._res = [] # Trace Send/Receive Response Packets
self._ures = [] # Trace UnResponse Sent Packets
self._ips = {} # Trace Unique IPv4 Addresses
self._hops = {} # Traceroute Hop Ranges
self._rt = [] # Individual Route Trace Summaries
self._ports = {} # Completed Targets & Ports
self._portsdone = {} # Completed Traceroutes & Ports
self._rtt = {} # Round Trip Times (msecs) for Trace Nodes
self._unknownlabel = incremental_label('"Unk%i"')
self._asres = conf.AS_resolver # Initial ASN Resolver
self._asns = {} # Found AS Numbers for the MTR session
self._asds = {} # Associated AS Number descriptions
self._unks = {} # Unknown Hops ASN IP boundaries
self._graphdef = None
self._graphasres = 0
self._graphpadding = 0
#
# Get the protocol name from protocol integer value.
#
# proto - Protocol integer value.
#
# Returns a string value representing the given integer protocol.
def get_proto_name(self, proto):
ps = str(proto)
if (ps == '6'):
pt = 'tcp'
elif (ps == '17'):
pt = 'udp'
elif (ps == '1'):
pt = 'icmp'
else:
pt = str(proto)
return pt
#
# Compute Black Holes...
def get_black_holes(self):
for t in range(0, self._ntraces):
for rtk in self._rt[t]:
trace = self._rt[t][rtk]
k = trace.keys()
for n in range(min(k), max(k)):
if not n in trace: # Fill in 'Unknown' hops
trace[n] = next(self._unknownlabel)
if not rtk in self._portsdone:
if rtk[2] == 1: #ICMP
bh = "%s %i/icmp" % (rtk[1],rtk[3])
elif rtk[2] == 6: #TCP
bh = "{ip:s} {dp:d}/tcp".format(ip = rtk[1], dp = rtk[3])
elif rtk[2] == 17: #UDP
bh = '%s %i/udp' % (rtk[1],rtk[3])
else:
bh = '%s %i/proto' % (rtk[1],rtk[2])
self._ips[rtk[1]] = None # Add the Blackhole IP to list of unique IP Addresses
#
# Update trace with Blackhole info...
bh = '"{bh:s}"'.format(bh = bh)
trace[max(k)+1] = bh
#
# Detection for Blackhole - Failed target not set as last Hop in trace...
for t in range(0, self._ntraces):
for rtk in self._rt[t]:
trace = self._rt[t][rtk]
k = trace.keys()
if ((' ' not in trace[max(k)]) and (':' not in trace[max(k)])):
if rtk[2] == 1: #ICMP
bh = "%s %i/icmp" % (rtk[1],rtk[3])
elif rtk[2] == 6: #TCP
bh = "{ip:s} {dp:d}/tcp".format(ip = rtk[1], dp = rtk[3])
elif rtk[2] == 17: #UDP
bh = '%s %i/udp' % (rtk[1],rtk[3])
else:
bh = '%s %i/proto' % (rtk[1],rtk[2])
self._ips[rtk[1]] = None # Add the Blackhole IP to list of unique IP Addresses
#
# Update trace with Blackhole info...
bh = '"{bh:s}"'.format(bh = bh)
trace[max(k)+1] = bh
#
# Compute the Hop range for each trace...
def compute_hop_ranges(self):
n = 1
for t in range(0, self._ntraces):
for rtk in self._rt[t]:
trace = self._rt[t][rtk]
k = trace.keys()
#
# Detect Blackhole Endpoints...
h = rtk[1]
mt = max(k)
if not ':' in trace[max(k)]:
h = trace[max(k)].replace('"','') # Add a Blackhole Endpoint (':' Char does not exist)
if (max(k) == 1):
#
# Special case: Max TTL set to 1...
mt = 1
else:
mt = max(k) - 1 # Blackhole - remove Hop for Blackhole -> Host never reached
hoplist = self._hops.get(h,[]) # Get previous hop value
hoplist.append([n, min(k), mt]) # Append trace hop range for this trace
self._hops[h] = hoplist # Update mtr Hop value
n += 1
#
# Get AS Numbers...
def get_asns(self, privaddr = 0):
"""Obtain associated AS Numbers for IPv4 Addreses.
privaddr: 0 - Normal display of AS numbers,
1 - Do not show an associated AS Number bound box (cluster) on graph for a private IPv4 Address."""
ips = {}
if privaddr:
for k,v in self._ips.items():
if (not is_private_addr(k)):
ips[k] = v
else:
ips = self._ips
#
# Special case for the loopback IP Address: 127.0.0.1 - Do not ASN resolve...
if '127.0.0.1' in ips:
del ips['127.0.0.1']
#
# ASN Lookup...
asnquerylist = dict.fromkeys(map(lambda x:x.rsplit(" ",1)[0], ips)).keys()
if self._asres is None:
asnlist = []
else:
try:
asnlist = self._asres.resolve(*asnquerylist)
except:
pass
for ip,asn,desc, in asnlist:
if asn is None:
continue
iplist = self._asns.get(asn,[]) # Get previous ASN value
iplist.append(ip) # Append IP Address to previous ASN
#
# If ASN is a string Convert to a number: (i.e., 'AS3257' => 3257)
if (type(asn) == str):
asn = asn.upper()
asn = asn.replace('AS','')
try:
asn = int(asn)
self._asns[asn] = iplist
self._asds[asn] = desc
except:
continue
else:
self._asns[asn] = iplist
self._asds[asn] = desc
#
# Get the ASN for a given IP Address.
#
# ip - IP Address to get the ASN for.
#
# Return the ASN for a given IP Address if found.
# A -1 is returned if not found.
def get_asn_ip(self, ip):
for a in self._asns:
for i in self._asns[a]:
if (ip == i):
return a
return -1
#
# Guess Traceroute 'Unknown (Unkn) Hops' ASNs.
#
# Technique: Method to guess ASNs for Traceroute 'Unknown Hops'.
# If the assign ASN for the known Ancestor IP is the
# same as the known Descendant IP then use this ASN
# for the 'Unknown Hop'.
# Special case guess: If the Descendant IP is a
# Endpoint Host Target the assign it to its
# associated ASN.
def guess_unk_asns(self):
t = 1
for q in range(0, self._ntraces):
for rtk in self._rt[q]:
trace = self._rt[q][rtk]
tk = trace.keys()
begip = endip = ''
unklist = []
for n in range(min(tk), (max(tk) + 1)):
if (trace[n].find('Unk') == -1):
#
# IP Address Hop found...
if (len(unklist) == 0):
#
# No 'Unknown Hop' found yet...
begip = trace[n]
else:
#
# At least one Unknown Hop found - Store IP boundary...
endip = trace[n]
for u in unklist:
idx = begip.find(':')
if (idx != -1): # Remove Endpoint Trace port info: '"162.144.22.85":T443'
begip = begip[:idx]
idx = endip.find(':')
if (idx != -1):
endip = endip[:idx]
#
# u[0] - Unknown Hop name...
# u[1] - Hop number...
self._unks[u[0]] = [begip, endip, '{t:d}:{h:d}'.format(t = t, h = u[1])]
#
# Init var for new Unknown Hop search...
begip = endip = ''
unklist = []
else:
#
# 'Unknown Hop' found...
unklist.append([trace[n], n])
t += 1 # Inc next trace count
#
# Assign 'Unknown Hop' ASN...
for u in self._unks:
bip = self._unks[u][0]
bip = bip.replace('"','') # Begin IP - Strip off surrounding double quotes (")
basn = self.get_asn_ip(bip)
if (basn == -1):
continue;
eip = self._unks[u][1]
eip = eip.replace('"','')
easn = self.get_asn_ip(eip)
if (easn == -1):
continue;
#
# Append the 'Unknown Hop' to an ASN if
# Ancestor/Descendant IP ASN match...
if (basn == easn):
self._asns[basn].append(u.replace('"',''))
else:
#
# Special case guess: If the Descendant IP is
# a Endpoint Host Target the assign it to its
# associated ASN.
for d in self._tlblid:
if (eip in d):
self._asns[easn].append(u.replace('"',''))
break
#
# Make the DOT graph...
def make_dot_graph(self, ASres = None, padding = 0, vspread = 0.75, title = "Multi-Traceroute (MTR) Probe", timestamp = "", rtt = 1):
import datetime
if ASres is None:
self._asres = conf.AS_resolver
self._graphasres = ASres
self._graphpadding = padding
#
# ASN box color generator...
backcolorlist=colgen("60","86","ba","ff")
#
# Edge (trace arrows) color generator...
forecolorlist=colgen("a0","70","40","20")
#
# Begin the DOT Digraph...
s = "### Scapy3k Multi-Traceroute (MTR) DOT Graph Results ({t:s}) ###\n".format(t = datetime.datetime.now().isoformat(' '))
s += "\ndigraph mtr {\n"
#
# Define the default graph attributes...
s += '\tgraph [bgcolor=transparent,ranksep={vs:.2f}];\n'.format(vs = vspread)
#
# Define the default node shape and drawing color...
s += '\tnode [shape="ellipse",fontname="Sans-Serif",fontsize=11,color="black",gradientangle=270,fillcolor="white:#a0a0a0",style="filled"];\n'
#
# Combine Trace Probe Begin Points...
#
# k0 k1 k2 v0 v1 k0 k1 k2 v0 v1
# Ex: bp = {('192.168.43.48',5555,''): ['T1','T3'], ('192.168.43.48',443,'https'): ['T2','T4']}
bp = {} # ep -> A single services label for a given IP
for d in self._tlblid: # k v0 v1 v2 v3 v4 v5 v6 v7
for k,v in d.items(): # Ex: k: '162.144.22.87' v: ('T1', '192.168.43.48', '162.144.22.87', 6, 443, 'https', 'SA', '')
p = bp.get((v[1], v[4], v[5]))
if (p == None):
bp[(v[1], v[4], v[5])] = [v[0]] # Add new (TCP Flags / ICMP / Proto) and initial trace ID
else:
bp[(v[1], v[4], v[5])].append(v[0]) # Append additional trace IDs
#
# Combine Begin Point services...
# k sv0 sv1 sv0 sv1
# Ex bpip = {'192.168.43.48': [('<BT2>T2|<BT4>T4', 'https(443)'), ('<BB1>T1|<BT3>T3', '5555')]}
bpip = {} # epip -> Combined Endpoint services label for a given IP
for k,v in bp.items():
tr = ''
for t in range(0, len(v)):
if (tr == ''):
tr += '<B{ts:s}>{ts:s}'.format(ts = v[t])
else:
tr += '|<B{ts:s}>{ts:s}'.format(ts = v[t])
p = k[2]
if (p == ''): # Use port number not name if resolved
p = str(k[1])
else:
p += '(' + str(k[1]) + ')' # Use both name and port
if k[0] in bpip:
bpip[k[0]].append((tr, p))
else:
bpip[k[0]] = [(tr, p)]
#
# Create Endpoint Target Clusters...
epc = {} # Endpoint Target Cluster Dictionary
epip = [] # Endpoint IPs array
oip = [] # Only Endpoint IP array
epprb = [] # Endpoint Target and Probe the same IP array
for d in self._tlblid: # Spin thru Target IDs
for k,v in d.items(): # Get access to Target Endpoints
h = k
if (v[6] == 'BH'): # Add a Blackhole Endpoint Target
h = '{bh:s} {bhp:d}/{bht:s}'.format(bh = k, bhp = v[4], bht = v[3])
elif (v[1] == v[2]): # When the Target and host running the mtr session are
epprb.append(k) # the same then append IP to list target and probe the same array
epip.append(h)
oip.append(k)
#
# Create unique arrays...
uepip = set(epip) # Get a unique set of Endpoint IPs
uepipo = set(oip) # Get a unique set of Only Endpoint IPs
uepprb = set(epprb) # Get a unique set of Only IPs: Endpoint Target and Probe the same
#
# Now create unique endpoint target clusters....
for ep in uepip:
#
# Get Host only string...
eph = ep
f = ep.find(' ')
if (f >= 0):
eph = ep[0:f]
#
# Build Traceroute Hop Range label...
if ep in self._hops: # Is Endpoint IP in the Hops dictionary
hr = self._hops[ep]
elif eph in self._hops: # Is Host only endpoint in the Hops dictionary
hr = self._hops[eph]
else:
continue # Not found in the Hops dictionary
l = len(hr)
if (l == 1):
hrs = "Hop Range ("
else:
hrs = "Hop Ranges ("
c = 0
for r in hr:
hrs += 'T{s1:d}: {s2:d} → {s3:d}'.format(s1 = r[0], s2 = r[1], s3 = r[2])
c += 1
if (c < l):
hrs += ', '
hrs += ')'
ecs = "\t\t### MTR Target Cluster ###\n"
uep = ep.replace('.', '_')
uep = uep.replace(' ', '_')
uep = uep.replace('/', '_')
gwl = ''
if (self._gw == eph):
gwl = ' (Default Gateway)'
ecs += '\t\tsubgraph cluster_{ep:s} {{\n'.format(ep = uep)
ecs += '\t\t\ttooltip="MTR Target: {trg:s}{gwl:s}";\n'.format(trg = self._ip2host[eph], gwl = gwl)
ecs += '\t\t\tcolor="green";\n'
ecs += '\t\t\tfontsize=11;\n'
ecs += '\t\t\tfontname="Sans-Serif";\n'
ecs += '\t\t\tgradientangle=270;\n'
ecs += '\t\t\tfillcolor="white:#a0a0a0";\n'
ecs += '\t\t\tstyle="filled,rounded";\n'
ecs += '\t\t\tpenwidth=2;\n'
ecs += '\t\t\tlabel=<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"><TR><TD ALIGN="center"><B>Target: {h:s}{gwl:s}</B></TD></TR><TR><TD><FONT POINT-SIZE="9">{hr:s}</FONT></TD></TR></TABLE>>;\n'.format(h = self._ip2host[eph], gwl = gwl, hr = hrs)
ecs += '\t\t\tlabelloc="b";\n'
pre = ''
if ep in uepprb: # Special Case: Separate Endpoint Target from Probe
pre = '_' # when they are the same -> Prepend an underscore char: '_'
ecs += '\t\t\t"{pre:s}{ep:s}";\n'.format(pre = pre, ep = ep)
ecs += "\t\t}\n"
#
# Store Endpoint Cluster...
epc[ep] = ecs
#
# Create ASN Clusters (i.e. DOT subgraph and nodes)
s += "\n\t### ASN Clusters ###\n"
cipall = [] # Array of IPs consumed by all ASN Cluster
cepipall = [] # Array of IP Endpoints (Targets) consumed by all ASN Cluster
for asn in self._asns:
cipcur = []
s += '\tsubgraph cluster_{asn:d} {{\n'.format(asn = asn)
s += '\t\ttooltip="AS: {asn:d} - [{asnd:s}]";\n'.format(asn = asn, asnd = self._asds[asn])
col = next(backcolorlist)
s += '\t\tcolor="#{s0:s}{s1:s}{s2:s}";\n'.format(s0 = col[0], s1 = col[1], s2 = col[2])
#
# Fill in ASN Cluster the associated generated color using an 11.7% alpha channel value (30/256)...
s += '\t\tfillcolor="#{s0:s}{s1:s}{s2:s}30";\n'.format(s0 = col[0], s1 = col[1], s2 = col[2])
s += '\t\tstyle="filled,rounded";\n'
s += '\t\tnode [color="#{s0:s}{s1:s}{s2:s}",gradientangle=270,fillcolor="white:#{s0:s}{s1:s}{s2:s}",style="filled"];\n'.format(s0 = col[0], s1 = col[1], s2 = col[2])
s += '\t\tfontsize=10;\n'
s += '\t\tfontname="Sans-Serif";\n'
s += '\t\tlabel=<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"><TR><TD ALIGN="center"><B><FONT POINT-SIZE="11">AS: {asn:d}</FONT></B></TD></TR><TR><TD>[{des:s}]</TD></TR></TABLE>>;\n'.format(asn = asn, des = self._asds[asn])
s += '\t\tlabelloc="t";\n'
s += '\t\tpenwidth=3;\n'
for ip in self._asns[asn]:
#
# Only add IP if not an Endpoint Target...
if not ip in uepipo:
#
# Spin thru all traces and only Add IP if not an ICMP Destination Unreachable node...
for tr in range(0, self._ntraces):
for rtk in self._rt[tr]:
trace = self._rt[tr][rtk]
k = trace.keys()
for n in range(min(k), (max(k) + 1)):
#
# Check for not already added...
if not ip in cipall:
#
# Add IP Hop - found in trace and not an ICMP Destination Unreachable node...
if ('"{ip:s}"'.format(ip = ip) == trace[n]):
s += '\t\t"{ip:s}" [tooltip="Hop Host: {ip:s}"];\n'.format(ip = ip)
cipall.append(ip)
#
# Special check for ICMP Destination Unreachable nodes...
if ip in self._ports:
for p in self._ports[ip]:
if (p.find('ICMP dest-unreach') >=0):
#
# Check for not already added...
uip = '{uip:s} 3/icmp'.format(uip = ip)
if not uip in cipall:
s += '\t\t"{uip:s}";\n'.format(uip = uip)
cipall.append(uip)
else:
cipcur.append(ip) # Current list of Endpoints consumed by this ASN Cluster
cepipall.append(ip) # Accumulated list of Endpoints consumed by all ASN Clusters
#
# Add Endpoint Cluster(s) if part of this ASN Cluster (Nested Clusters)...
if (len(cipcur) > 0):
for ip in cipcur:
for e in epc: # Loop thru each Endpoint Target Clusters
h = e
f = e.find(' ') # Strip off 'port/proto'
if (f >= 0):
h = e[0:f]
if (h == ip):
s += epc[e]
s += "\t}\n"
#
# Add any Endpoint Target Clusters not consumed by an ASN Cluster (Stand-alone Cluster)
# and not the same as the host running the mtr session...
for ip in epc:
h = ip
f = h.find(' ') # Strip off 'port/proto'
if (f >= 0):
h = ip[0:f]
if not h in cepipall:
for k,v in bpip.items(): # Check for target = host running the mtr session - Try to Add
if (k != h): # this Endpoint target to the Probe Target Cluster below.
s += epc[ip] # Finally add the Endpoint Cluster if Stand-alone and
# not running the mtr session.
#
# Probe Target Cluster...
s += "\n\t### Probe Target Cluster ###\n"
s += '\tsubgraph cluster_probe_Title {\n'
p = ''
for k,v in bpip.items():
p += ' {ip:s}'.format(ip = k)
s += '\t\ttooltip="Multi-Traceroute (MTR) Probe: {ip:s}";\n'.format(ip = p)
s += '\t\tcolor="darkorange";\n'
s += '\t\tgradientangle=270;\n'
s += '\t\tfillcolor="white:#a0a0a0";\n'
s += '\t\tstyle="filled,rounded";\n'
s += '\t\tpenwidth=3;\n'
s += '\t\tfontsize=11;\n'
s += '\t\tfontname="Sans-Serif";\n'
#
# Format Label including trace targets...
tstr = ''
for t in self._target:
tstr += '<TR><TD ALIGN="center"><FONT POINT-SIZE="9">Target: {t:s} ('.format(t = t)
#
# Append resolve IP Addresses...
l = len(self._host2ip[t])
c = 0
for ip in self._host2ip[t]:
tstr += '{ip:s} → '.format(ip = ip)
#
# Append all associated Target IDs...
ti = []
for d in self._tlblid: # Spin thru Target IDs
for k,v in d.items(): # Get access to Target ID (v[0])
if (k == ip):
ti.append(v[0])
lt = len(ti)
ct = 0
for i in ti:
tstr += '{i:s}'.format(i = i)
ct += 1
if (ct < lt):
tstr += ', '
c += 1
if (c < l):
tstr += ', '
tstr += ')</FONT></TD></TR>'
s += '\t\tlabel=<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"><TR><TD ALIGN="center"><B>{s0:s}</B></TD></TR>'.format(s0 = title)
if (timestamp != ""):
s += '<TR><TD ALIGN="center"><FONT POINT-SIZE="9">{s0:s}</FONT></TD></TR>'.format(s0 = timestamp)
s += '{s0:s}</TABLE>>;\n'.format(s0 = tstr)
s += '\t\tlabelloc="t";\n'
for k,v in bpip.items():
s += '\t\t"{ip:s}";\n'.format(ip = k)
#
# Add in any Endpoint target that is the same as the host running the mtr session...
for ip in epc:
h = ip
f = h.find(' ') # Strip off 'port/proto'
if (f >= 0):
h = ip[0:f]
for k,v in bpip.items(): # Check for target = host running the mtr session - Try to Add
if (k == h): # this Endpoint target to the Probe Target Cluster.
s += epc[ip]
s += "\t}\n"
#
# Default Gateway Cluster...
s += "\n\t### Default Gateway Cluster ###\n"
if (self._gw != ''):
if self._gw in self._ips:
if not self._gw in self._exptrg:
s += '\tsubgraph cluster_default_gateway {\n'
s += '\t\ttooltip="Default Gateway Host: {gw:s}";\n'.format(gw = self._gw)
s += '\t\tcolor="goldenrod";\n'
s += '\t\tgradientangle=270;\n'
s += '\t\tfillcolor="white:#b8860b30";\n'
s += '\t\tstyle="filled,rounded";\n'
s += '\t\tpenwidth=3;\n'
s += '\t\tfontsize=11;\n'
s += '\t\tfontname="Sans-Serif";\n'
s += '\t\tlabel=<<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0" ALIGN="center"><TR><TD><B><FONT POINT-SIZE="9">Default Gateway</FONT></B></TD></TR></TABLE>>;\n'
s += '\t\t"{gw:s}" [shape="diamond",fontname="Sans-Serif",fontsize=11,color="black",gradientangle=270,fillcolor="white:goldenrod",style="rounded,filled",tooltip="Default Gateway Host: {gw:s}"];\n'.format(gw = self._gw)
s += "\t}\n"
#
# Build Begin Point strings...
# Ex bps = '192.168.43.48" [shape="record",color="black",gradientangle=270,fillcolor="white:darkorange",style="filled",'
# + 'label="192.168.43.48\nProbe|{http|{<BT1>T1|<BT3>T3}}|{https:{<BT2>T4|<BT3>T4}}"];'
s += "\n\t### Probe Begin Traces ###\n"
for k,v in bpip.items():
tr = ''
for sv in v:
if (self._netprotocol == 'ICMP'):
if (sv[1].find('ICMP') >= 0):
ps = '{p:s} echo-request'.format(p = sv[1])
else:
ps = 'ICMP({p:s}) echo-request'.format(p = sv[1])
else:
ps = '{pr:s}: {p:s}'.format(pr = self._netprotocol, p = sv[1])
if (tr == ''):
tr += '{{{ps:s}|{{{t:s}}}}}'.format(ps = ps, t = sv[0])
else:
tr += '|{{{ps:s}|{{{t:s}}}}}'.format(ps = ps, t = sv[0])
bps1 = '\t"{ip:s}" [shape="record",color="black",gradientangle=270,fillcolor="white:darkorange",style="filled,rounded",'.format(ip = k)
if (self._iface != ''):
bps2 = 'label="Probe: {ip:s}\\nNetwork Interface: {ifc:s}|{tr:s}",tooltip="Begin Host Probe: {ip:s}"];\n'.format(ip = k, ifc = self._iface, tr = tr)
else:
bps2 = 'label="Probe: {ip:s}|{tr:s}",tooltip="Begin Host Probe: {ip:s}"];\n'.format(ip = k, tr = tr)
s += bps1 + bps2
#
s += "\n\t### Target Endpoints ###\n"
#
# Combine Trace Target Endpoints...
#
# k0 k1 k2 v0 v1 v2 k0 k1 k2 v0 v1 v2
# Ex: ep = {('162.144.22.87',80,'http'): ['SA','T1','T3'], ('10.14.22.8',443,'https'): ['SA','T2','T4']}
ep = {} # ep -> A single services label for a given IP
for d in self._tlblid: # k v0 v1 v2 v3 v4 v5 v6 v7
for k,v in d.items(): # Ex: k: 162.144.22.87 v: ('T1', '10.222.222.10', '162.144.22.87', 6, 443, 'https', 'SA', '')
if not (v[6] == 'BH'): # Blackhole detection - do not create Endpoint
p = ep.get((k, v[4], v[5]))
if (p == None):
ep[(k, v[4], v[5])] = [v[6], v[0]] # Add new (TCP Flags / ICMP type / Proto) and initial trace ID
else:
ep[(k, v[4], v[5])].append(v[0]) # Append additional trace IDs
#
# Combine Endpoint services...
# k v v
# k sv0 sv1 sv2 sv0 sv1 sv2
# Ex epip = {'206.111.13.58': [('<ET8>T8|<ET10>T10', 'https', 'SA'), ('<ET7>T7|<ET6>T6', 'http', 'SA')]}
epip = {} # epip -> Combined Endpoint services label for a given IP
for k,v in ep.items():
tr = ''
for t in range(1, len(v)):
if (tr == ''):
tr += '<E{ts:s}>{ts:s}'.format(ts = v[t])
else:
tr += '|<E{ts:s}>{ts:s}'.format(ts = v[t])
p = k[2]
if (p == ''): # Use port number not name if resolved
p = str(k[1])
else:
p += '(' + str(k[1]) + ')' # Use both name and port
if k[0] in epip:
epip[k[0]].append((tr, p, v[0]))
else:
epip[k[0]] = [(tr, p, v[0])]
#
# Build Endpoint strings...
# Ex eps = '162.144.22.87" [shape=record,color="black",gradientangle=270,fillcolor="lightgreen:green",style=i"filled,rounded",'
# + 'label="162.144.22.87\nTarget|{{<ET1>T1|<ET3>T3}|https SA}|{{<ET2>T4|<ET3>T4}|http SA}"];'
for k,v in epip.items():
tr = ''
for sv in v:
if (self._netprotocol == 'ICMP'):
ps = 'ICMP(0) echo-reply'
else:
ps = '{p:s} {f:s}'.format(p = sv[1], f = sv[2])
if (tr == ''):
tr += '{{{{{t:s}}}|{ps:s}}}'.format(t = sv[0], ps = ps)
else:
tr += '|{{{{{t:s}}}|{ps:s}}}'.format(t = sv[0], ps = ps)
pre = ''
if k in uepprb: # Special Case: Separate Endpoint Target from Probe
pre = '_' # when they are the same
eps1 = '\t"{pre:s}{ip:s}" [shape="record",color="black",gradientangle=270,fillcolor="lightgreen:green",style="filled,rounded",'.format(pre = pre, ip = k)
eps2 = 'label="Resolved Target\\n{ip:s}|{tr:s}",tooltip="MTR Resolved Target: {ip:s}"];\n'.format(ip = k, tr = tr)
s += eps1 + eps2
#
# Blackholes...
#
# ***Note: Order matters: If a hop is both a Blackhole on one trace and
# a ICMP destination unreachable hop on another,
# it will appear in the dot file as two nodes in
# both sections. The ICMP destination unreachable
# hop node will take precedents and appear only
# since it is defined last.
s += "\n\t### Blackholes ###\n"
bhhops = []
for d in self._tlblid: # k v0 v1 v2 v3 v4 v5 v6 v7
for k,v in d.items(): # Ex: k: 162.144.22.87 v: ('T1', '10.222.222.10', '162.144.22.87', 'tcp', 5555, '', 'BH', 'I3')
if (v[6] == 'BH'): # Blackhole detection
#
# If both a target blackhole and an ICMP packet hop, then skip creating this
# node we be created in the 'ICMP Destination Unreachable Hops' section.
if (v[7] != 'I3'): # ICMP destination not reached detection
nd = '{b:s} {prt:d}/{pro:s}'.format(b = v[2], prt = v[4], pro = v[3])
if (self._netprotocol == 'ICMP'):
bhh = '{b:s}<BR/><FONT POINT-SIZE="9">ICMP(0) echo-reply</FONT>'.format(b = v[2])
else:
bhh = nd
#
# If not already added...
if not bhh in bhhops:
lb = 'label=<{lh:s}<BR/><FONT POINT-SIZE="8">Failed Target</FONT>>'.format(lh = bhh)
s += '\t"{bh:s}" [{l:s},shape="doubleoctagon",color="black",gradientangle=270,fillcolor="white:red",style="filled,rounded",tooltip="Failed MTR Resolved Target: {b:s}"];\n'.format(bh = nd, l = lb, b = v[2])
bhhops.append(bhh)
#
# ICMP Destination Unreachable Hops...
s += "\n\t### ICMP Destination Unreachable Hops ###\n"
for d in self._ports:
for p in self._ports[d]:
if d in self._exptrg:
#
# Create Node: Target same as node that returns an ICMP packet...
if (p.find('ICMP dest-unreach') >=0 ):
unreach = 'ICMP(3): Destination'
# 0 1 2 3 4 5
# Ex ICMP ports: '<I3> ICMP dest-unreach port-unreachable 17 53'
icmpparts = p.split(' ')
if (icmpparts[3] == 'network-unreachable'):
unreach += '/Network'
elif (icmpparts[3] == 'host-unreachable'):
unreach += '/Host'
elif (icmpparts[3] == 'protocol-unreachable'):
unreach += '/Protocol'
elif (icmpparts[3] == 'port-unreachable'):
unreach += '/Port'
protoname = self.get_proto_name(icmpparts[4])
protoport = '{pr:s}/{pt:s}'.format(pr = icmpparts[5], pt = protoname)
lb = 'label=<{lh:s} {pp:s}<BR/><FONT POINT-SIZE="8">{u:s} Unreachable</FONT><BR/><FONT POINT-SIZE="8">Failed Target</FONT>>'.format(lh = d, pp = protoport, u = unreach)
s += '\t"{lh:s} {pp:s}" [{lb:s},shape="doubleoctagon",color="black",gradientangle=270,fillcolor="yellow:red",style="filled,rounded",tooltip="{u:s} Unreachable, Failed Resolved Target: {lh:s} {pp:s}"];\n'.format(lb = lb, pp = protoport, lh = d, u = unreach)
else:
#
# Create Node: Target not same as node that returns an ICMP packet...
if (p.find('ICMP dest-unreach') >= 0):
unreach = 'ICMP(3): Destination'
if (p.find('network-unreachable') >= 0):
unreach += '/Network'
elif (p.find('host-unreachable') >= 0):
unreach += '/Host'
elif (p.find('protocol-unreachable') >= 0):
unreach += '/Protocol'
elif (p.find('port-unreachable') >= 0):
unreach += '/Port'
lb = 'label=<{lh:s} 3/icmp<BR/><FONT POINT-SIZE="8">{u:s} Unreachable</FONT>>'.format(lh = d, u = unreach)
s += '\t"{lh:s} 3/icmp" [{lb:s},shape="doubleoctagon",color="black",gradientangle=270,fillcolor="white:yellow",style="filled,rounded",tooltip="{u:s} Unreachable, Hop Host: {lh:s}"];\n'.format(lb = lb, lh = d, u = unreach)
#
# Padding check...
if self._graphpadding:
s += "\n\t### Nodes With Padding ###\n"
pad = {}
for t in range(0, self._ntraces):
for snd,rcv in self._res[t]:
if rcv.src not in self._ports and rcv.haslayer(conf.padding_layer):
p = rcv.getlayer(conf.padding_layer).load
if p != "\x00" * len(p):
pad[rcv.src] = None
for sr in pad:
lb = 'label=<<BR/>{r:s}<BR/><FONT POINT-SIZE="8">Padding</FONT>>'.format(r = sr)
s += '\t"{r:s}" [{l:s},shape="box3d",color="black",gradientangle=270,fillcolor="white:red",style="filled,rounded"];\n'.format(r = sr, l = lb)
#
# Draw each trace (i.e., DOT edge) for each number of queries...
s += "\n\t### Traces ###\n"
t = 0
for q in range(0, self._ntraces):
for rtk in self._rt[q]:
s += "\t### T{tr:d} -> {r:s} ###\n".format(tr = (t + 1), r = repr(rtk))
col = next(forecolorlist)
s += '\tedge [color="#{s0:s}{s1:s}{s2:s}"];\n'.format(s0 = col[0], s1 = col[1], s2 = col[2])
#
# Probe Begin Point (i.e., Begining of a trace)...
for k,v in self._tlblid[t].items():
ptr = probe = v[1]
s += '\t"{bp:s}":B{tr:s}:s -> '.format(bp = ptr, tr = v[0])
#
# In between traces (i.e., Not at the begining or end of a trace)...
trace = self._rt[q][rtk]
tk = trace.keys()
ntr = trace[min(tk)]
#
# Skip in between traces if there are none...
if (len(trace) > 1):
lb = 'Trace: {tr:d}:{tn:d}, {lbp:s} -> {lbn:s}'.format(tr = (t + 1), tn = min(tk), lbp = ptr, lbn = ntr.replace('"',''))
if not 'Unk' in ntr:
lb += ' (RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms))'.format(prb = probe, lbn = ntr.replace('"',''), rtt = self._rtt[t + 1][min(tk)])
if rtt:
if not 'Unk' in ntr:
llb = 'Trace: {tr:d}:{tn:d}, RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms)'.format(tr = (t + 1), tn = min(tk), prb = probe, lbn = ntr.replace('"',''), rtt = self._rtt[t + 1][min(tk)])
s += '{ntr:s} [label=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(ntr = ntr, rtt = self._rtt[t + 1][min(tk)], lb = lb, llb = llb)
else:
s += '{ntr:s} [edgetooltip="{lb:s}"];\n'.format(ntr = ntr, lb = lb)
else:
s += '{ntr:s} [edgetooltip="{lb:s}"];\n'.format(ntr = ntr, lb = lb)
for n in range(min(tk) + 1, max(tk)):
ptr = ntr
ntr = trace[n]
lb = 'Trace: {tr:d}:{tn:d}, {lbp:s} -> {lbn:s}'.format(tr = (t + 1), tn = n, lbp = ptr.replace('"',''), lbn = ntr.replace('"',''))
if not 'Unk' in ntr:
lb += ' (RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms))'.format(prb = probe, lbn = ntr.replace('"',''), rtt = self._rtt[t + 1][n])
if rtt:
if not 'Unk' in ntr:
llb = 'Trace: {tr:d}:{tn:d}, RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms)'.format(tr = (t + 1), tn = n, prb = probe, lbn = ntr.replace('"',''), rtt = self._rtt[t + 1][n])
#
# Special check to see if the next and previous nodes are the same.
# If yes use the DOT 'xlabel' attribute to spread out labels so that they
# do not clash and 'forcelabel' so that they are placed.
if (ptr == ntr):
s += '\t{ptr:s} -> {ntr:s} [xlabel=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,forcelabel=True,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(ptr = ptr, ntr = ntr, rtt = self._rtt[t + 1][n], lb = lb, llb = llb)
else:
s += '\t{ptr:s} -> {ntr:s} [label=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(ptr = ptr, ntr = ntr, rtt = self._rtt[t + 1][n], lb = lb, llb = llb)
else:
s += '\t{ptr:s} -> {ntr:s} [edgetooltip="{lb:s}"];\n'.format(ptr = ptr, ntr = ntr, lb = lb)
else:
s += '\t{ptr:s} -> {ntr:s} [edgetooltip="{lb:s}"];\n'.format(ptr = ptr, ntr = ntr, lb = lb)
#
# Enhance target Endpoint (i.e., End of a trace) replacement...
for k,v in self._tlblid[t].items():
if (v[6] == 'BH'): # Blackhole detection - do not create Enhanced Endpoint
#
# Check for Last Hop / Backhole (Failed Target) match:
lh = trace[max(tk)]
lhicmp = False
if (lh.find(':I3') >= 0): # Is last hop and ICMP packet from target?
lhicmp = True
f = lh.find(' ') # Strip off 'port/proto' ''"100.41.207.244":I3'
if (f >= 0):
lh = lh[0:f]
f = lh.find(':') # Strip off 'proto:port' -> '"100.41.207.244 801/tcp"'
if (f >= 0):
lh = lh[0:f]
lh = lh.replace('"','') # Remove surrounding double quotes ("")
if (k == lh): # Does Hop match final Target?
#
# Backhole last hop matched target:
#
# Check to skip in between traces...
if (len(trace) > 1):
s += '\t{ptr:s} -> '.format(ptr = ntr)
if lhicmp:
#
# Last hop is an ICMP packet from target and was reached...
lb = 'Trace: {tr:d}:{tn:d}, {lbp:s} -> {lbn:s}'.format(tr = (t + 1), tn = max(tk), lbp = ntr.replace('"',''), lbn = k)
lb += ' (RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms))'.format(prb = v[1], lbn = lh, rtt = self._rtt[t + 1][max(tk)])
if rtt:
llb = 'Trace: {tr:d}:{tn:d}, RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms)'.format(tr = (t + 1), tn = max(tk), prb = v[1], lbn = k, rtt = self._rtt[t + 1][max(tk)])
s += '"{bh:s} {bhp:d}/{bht:s}" [style="solid",label=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(bh = k, bhp = v[4], bht = v[3], rtt = self._rtt[t + 1][max(tk)], lb = lb, llb = llb)
else:
s += '"{bh:s} {bhp:d}/{bht:s}" [style="solid",edgetooltip="{lb:s}"];\n'.format(bh = k, bhp = v[4], bht = v[3], lb = lb)
else:
#
# Last hop is not ICMP packet from target (Fake hop - never reached - use dashed trace)...
lb = 'Trace: {tr:d} - Failed MTR Resolved Target: {bh:s} {bhp:d}/{bht:s}'.format(tr = (t + 1), bh = k, bhp = v[4], bht = v[3])
s += '"{bh:s} {bhp:d}/{bht:s}" [style="dashed",label=<<FONT POINT-SIZE="8"> T{tr:d}</FONT>>,edgetooltip="{lb:s}",labeltooltip="{lb:s}"];\n'.format(bh = k, bhp = v[4], bht = v[3], tr = (t + 1), lb = lb)
else:
#
# Backhole not matched (Most likely: 'ICMP (3) destination-unreached'
# but last hop not equal to the target:
#
# Add this last Hop (This Hop is not the Target)...
#
# Check to skip in between traces...
if (len(trace) > 1):
s += '\t{ptr:s} -> '.format(ptr = ntr)
lb = 'Trace: {tr:d}:{tn:d}, {lbp:s} -> {lbn:s}'.format(tr = (t + 1), tn = max(tk), lbp = ntr.replace('"',''), lbn = lh)
lb += ' (RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms))'.format(prb = v[1], lbn = lh, rtt = self._rtt[t + 1][max(tk)])
llb = 'Trace: {tr:d}:{tn:d}, RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms)'.format(tr = (t + 1), tn = max(tk), prb = v[1], lbn = lh, rtt = self._rtt[t + 1][max(tk)])
if rtt:
s += '"{lh:s} 3/icmp" [style="solid",label=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(lh = lh, rtt = self._rtt[t + 1][max(tk)], lb = lb, llb = llb)
else:
s += '"{lh:s} 3/icmp" [style="solid",edgetooltip="{lb:s} 3/icmp",labeltooltip="{llb:s}"];\n'.format(lh = lh, lb = lb, llb = llb)
#
# Add the Failed Target (Blackhole - Fake hop - never reached - use dashed trace)...
s += '\t"{lh:s} 3/icmp" -> '.format(lh = lh)
lb = 'Trace: {tr:d} - Failed MTR Resolved Target: {bh:s} {bhp:d}/{bht:s}'.format(tr = (t + 1), bh = k, bhp = v[4], bht = v[3])
s += '"{bh:s} {bhp:d}/{bht:s}" [style="dashed",label=<<FONT POINT-SIZE="8"> T{tr:d}</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(bh = k, bhp = v[4], bht = v[3], tr = (t + 1), lb = lb, llb = lb)
else: # Enhanced Target Endpoint
#
# Check to skip in between traces...
if (len(trace) > 1):
s += '\t{ptr:s} -> '.format(ptr = ntr)
lb = 'Trace: {tr:d}:{tn:d}, {lbp:s} -> {lbn:s}'.format(tr = (t + 1), tn = max(tk), lbp = ntr.replace('"',''), lbn = k)
if not 'Unk' in k:
lb += ' (RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms))'.format(prb = v[1], lbn = k, rtt = self._rtt[t + 1][max(tk)])
pre = ''
if k in uepprb: # Special Case: Distinguish the Endpoint Target from Probe
pre = '_' # when they are the same using the underscore char: '_'.
if rtt:
if not 'Unk' in k:
llb = 'Trace: {tr:d}:{tn:d}, RTT: {prb:s} <-> {lbn:s} ({rtt:s}ms)'.format(tr = (t + 1), tn = max(tk), prb = v[1], lbn = k, rtt = self._rtt[t + 1][max(tk)])
#
# Check to remove label clashing...
ntrs = ntr.replace('"','') # Remove surrounding double quotes ("")
if (ntrs == k):
s += '"{pre:s}{ep:s}":E{tr:s}:n [style="solid",xlabel=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,forcelabel=True,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(pre = pre, ep = k, tr = v[0], rtt = self._rtt[t + 1][max(tk)], lb = lb, llb = llb)
else:
s += '"{pre:s}{ep:s}":E{tr:s}:n [style="solid",label=<<FONT POINT-SIZE="8"> {rtt:s}ms</FONT>>,edgetooltip="{lb:s}",labeltooltip="{llb:s}"];\n'.format(pre = pre, ep = k, tr = v[0], rtt = self._rtt[t + 1][max(tk)], lb = lb, llb = llb)
else:
s += '"{pre:s}{ep:s}":E{tr:s}:n [style="solid",edgetooltip="{lb:s}"];\n'.format(pre = pre, ep = k, tr = v[0], lb = lb)
else:
s += '"{pre:s}{ep:s}":E{tr:s}:n [style="solid",edgetooltip="{lb:s}"];\n'.format(pre = pre, ep = k, tr = v[0], lb = lb)
t += 1 # Next trace out of total traces
#
# Decorate Unknown ('Unkn') Nodes...
s += "\n\t### Decoration For Unknown (Unkn) Node Hops ###\n"
for u in self._unks:
s += '\t{u:s} [tooltip="Trace: {t:s}, Unknown Hop: {u2:s}",shape="egg",fontname="Sans-Serif",fontsize=9,height=0.2,width=0.2,color="black",gradientangle=270,fillcolor="white:#d8d8d8",style="filled"];\n'.format(u = u, t = self._unks[u][2], u2 = u.replace('"',''))
#
# Create tooltip for standalone nodes...
s += "\n\t### Tooltip for Standalone Node Hops ###\n"
for k,v in self._ips.items():
if not k in cipall:
if (k != self._gw):
if not k in cepipall:
if not k in self._ports:
found = False
for tid in self._tlblid:
if k in tid:
found = True
break
if not found:
s += '\t"{ip:s}" [tooltip="Hop Host: {ip:s}"];\n'.format(ip = k)
#
# End the DOT Digraph...
s += "}\n";
#
# Store the DOT Digraph results...
self._graphdef = s
#
# Graph the Multi-Traceroute...
def graph(self, ASres=None, padding=0, vspread=0.75, title="Multi-Traceroute Probe (MTR)", timestamp="", rtt=1, **kargs):
"""x.graph(ASres=conf.AS_resolver, other args):
ASres = None : Use AS default resolver => 'conf.AS_resolver'
ASres = AS_resolver() : default whois AS resolver (riswhois.ripe.net)
ASres = AS_resolver_cymru(): use whois.cymru.com whois database
ASres = AS_resolver(server="whois.ra.net")
padding: Show packets with padding as a red 3D-Box.
vspread: Vertical separation between nodes on graph.
title: Title text for the rendering graphic.
timestamp: Title Time Stamp text to appear below the Title text.
rtt: Display Round-Trip Times (msec) for Hops along trace edges.
format: Output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option.
figsize: w,h tuple in inches. See matplotlib documentation.
target: filename. If None, uses matplotlib to display.
prog: Which graphviz program to use."""
if self._asres is None:
self._asres = conf.AS_resolver
if (self._graphdef is None or # Remake the graph if there are any changes
self._graphasres != self._asres or
self._graphpadding != padding):
self.make_dot_graph(ASres, padding, vspread, title, timestamp, rtt)
return do_graph(self._graphdef, **kargs)
####################################
## Multi-Traceroute Results Class ##
####################################
class MTracerouteResult(SndRcvList):
def __init__(self, res=None, name="MTraceroute", stats=None):
PacketList.__init__(self, res, name, stats, vector_index = 1)
def show(self, ntrace):
return self.make_table(lambda s,r:
(s.sprintf("Trace: " + str(ntrace) + " - %IP.dst%:{TCP:tcp%ir,TCP.dport%}{UDP:udp%ir,UDP.dport%}{ICMP:ICMP}"),
s.ttl,
r.sprintf("%-15s,IP.src% {TCP:%TCP.flags%}{ICMP:%ir,ICMP.type%}")))
#
# Get trace components...
#
# mtrc - Instance of a MTRC class
#
# nq - Traceroute query number
def get_trace_components(self, mtrc, nq):
ips = {}
rt = {}
rtt = {}
trtt = {}
ports = {}
portsdone = {}
trgttl = {}
if (len(self.res) > 0):
#
# Responses found...
for s,r in self.res:
s = s.getlayer(IP) or (conf.ipv6_enabled and s[scapy.layers.inet6.IPv6]) or s
r = r.getlayer(IP) or (conf.ipv6_enabled and r[scapy.layers.inet6.IPv6]) or r
#
# Make sure 'r.src' is an IP Address (e.g., Case where r.src = '24.97.150.188 80/tcp')
rs = r.src.split()
ips[rs[0]] = None
if TCP in s:
trace_id = (s.src, s.dst, 6, s.dport)
elif UDP in s:
trace_id = (s.src, s.dst, 17, s.dport)
elif ICMP in s:
trace_id = (s.src, s.dst, 1, s.type)
else:
trace_id = (s.src, s.dst, s.proto, 0)
trace = rt.get(trace_id, {})
ttl = conf.ipv6_enabled and scapy.layers.inet6.IPv6 in s and s.hlim or s.ttl
#
# Check for packet response types:
if not (ICMP in r and r[ICMP].type == 11) and not (conf.ipv6_enabled and scapy.layers.inet6.IPv6 in r and scapy.layers.inet6.ICMPv6TimeExceeded in r):
#
# Mostly: Process target reached or ICMP Unreachable...
if trace_id in portsdone:
#
# Special check for out or order response packets: If previous trace was determined
# done, but a ttl arrives with a lower value then process this response packet as the
# final ttl target packet.
if (ttl >= trgttl[trace_id]):
continue # Next Send/Receive packet
else:
#
# Out of order response packet - process this packet as the possible
# final ttl target packet.
try:
if trgttl[trace_id] in trace:
del trace[trgttl[trace_id]] # Remove previous ttl target
except:
pass
portsdone[trace_id] = None
trgttl[trace_id] = ttl # Save potential target ttl packet
p = ports.get(r.src,[])
if TCP in r:
p.append(r.sprintf("<T%ir,TCP.sport%> %TCP.sport% %TCP.flags%"))
trace[ttl] = r.sprintf('"%r,src%":T%ir,TCP.sport%')
elif UDP in r:
p.append(r.sprintf("<U%ir,UDP.sport%> %UDP.sport%"))
trace[ttl] = r.sprintf('"%r,src%":U%ir,UDP.sport%')
elif ICMP in r:
if (r[ICMP].type == 0):
#
# Process echo-reply...
p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type%"))
trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
else:
#
# Format Ex: '<I3> ICMP dest-unreach port-unreachable 17 53'
p.append(r.sprintf("<I%ir,ICMP.type%> ICMP %ICMP.type% %ICMP.code% %ICMP.proto% %r,ICMP.dport%"))
trace[ttl] = r.sprintf('"%r,src%":I%ir,ICMP.type%')
else:
p.append(r.sprintf("{IP:<P%ir,proto%> IP %proto%}{IPv6:<P%ir,nh%> IPv6 %nh%}"))
trace[ttl] = r.sprintf('"%r,src%":{IP:P%ir,proto%}{IPv6:P%ir,nh%}')
ports[r.src] = p
else:
#
# Mostly ICMP Time-Exceeded packet - Save Hop Host IP Address...
trace[ttl] = r.sprintf('"%r,src%"')
rt[trace_id] = trace
#
# Compute the Round Trip Time for this trace packet in (msec)...
rtrace = rtt.get(trace_id, {})
crtt = (r.time - s.sent_time) * 1000
rtrace[ttl] = "{crtt:.3f}".format(crtt = crtt)
rtt[trace_id] = rtrace
else:
#
# No Responses found - Most likely target same as host running the mtr session...
#
# Create a 'fake' failed target (Blackhole) trace using the destination host
# found in unanswered packets...
for p in mtrc._ures[nq]:
ips[p.dst] = None
trace_id = (p.src, p.dst, p.proto, p.dport)
portsdone[trace_id] = None
if trace_id not in rt:
pt = mtrc.get_proto_name(p.proto)
#
# Set trace number to zero (0) (i.e., ttl = 0) for this special case:
# target = mtr session host - 'fake' failed target...
rt[trace_id] = {1: '"{ip:s} {pr:d}/{pt:s}"'.format(ip = p.dst, pr = p.dport, pt = pt)}
#
# Store each trace component...
mtrc._ips.update(ips) # Add unique IP Addresses
mtrc._rt.append(rt) # Append a new Traceroute
mtrc._ports.update(ports) # Append completed Traceroute target and port info
mtrc._portsdone.update(portsdone) # Append completed Traceroute with associated target and port
#
# Create Round Trip Times Trace lookup dictionary...
tcnt = mtrc._tcnt
for rttk in rtt:
tcnt += 1
trtt[tcnt] = rtt[rttk]
mtrc._rtt.update(trtt) # Update Round Trip Times for Trace Nodes
#
# Update the Target Trace Label IDs and Blackhole (Failed Target) detection...
#
# rtk0 rtk1 rtk2 rtk3
# Ex: {('10.222.222.10', '10.222.222.1', 6, 9980): {1: '"10.222.222.10":T9980'}}
for rtk in rt:
mtrc._tcnt += 1 # Compute the total trace count
#
# Derive flags from ports:
# Ex: {'63.117.14.247': ['<T80> http SA', '<T443> https SA']}
prtflgs = ports.get(rtk[1],[])
found = False
for pf in prtflgs:
if (mtrc._netprotocol == 'ICMP'):
pat = '<I0>' # ICMP: Create reg exp pattern
else:
pat = '<[TU]{p:d}>'.format(p = rtk[3]) # TCP/UDP: Create reg exp pattern
match = re.search(pat, pf) # Search for port match
if match:
found = True
s = pf.split(' ')
if (len(s) == 3):
pn = s[1] # Service Port name / ICMP
fl = s[2] # TCP Flags / ICMP Type / Proto
elif (len(s) == 2):
pn = s[1] # Service Port name
fl = ''
else:
pn = ''
fl = ''
break
ic = '' # ICMP Destination not reachable flag
if not found: # Set Blackhole found - (fl -> 'BH')
#
# Set flag for last hop is a target and ICMP destination not reached flag set...
trace = rt[rtk]
tk = trace.keys()
lh = trace[max(tk)]
f = lh.find(':I3') # Is hop an ICMP destination not reached node?
if (f >= 0):
lh = lh[0:f] # Strip off 'proto:port' -> '"100.41.207.244":I3'
lh = lh.replace('"','') # Remove surrounding double quotes ("")
if lh in mtrc._exptrg: # Is last hop a target?
ic = 'I3'
pn = ''
fl = 'BH'
#
# Update the Target Trace Label ID:
# Ex: {'63.117.14.247': ('T2', '10.222.222.10', '162.144.22.87', 6, 443, 'https', 'SA', '')}
pt = mtrc.get_proto_name(rtk[2])
tlid = {rtk[1]: ('T' + str(mtrc._tcnt), rtk[0], rtk[1], pt, rtk[3], pn, fl, ic)}
mtrc._tlblid.append(tlid)
######################
## Multi-Traceroute ##
######################
@conf.commands.register
def mtr(target, dport=80, minttl=1, maxttl=30, stype="Random", srcport=50000, iface=None, l4=None, filter=None, timeout=2, verbose=None, gw=None, netproto="TCP", nquery=1, ptype=None, payload=b'', privaddr=0, rasn=1, **kargs):
"""A Multi-Traceroute (mtr) command:
mtr(target, [maxttl=30,] [dport=80,] [sport=80,] [minttl=1,] [maxttl=1,] [iface=None]
[l4=None,] [filter=None,] [nquery=1,] [privaddr=0,] [rasn=1,] [verbose=conf.verb])
stype: Source Port Type: "Random" or "Increment".
srcport: Source Port. Default: 50000.
gw: IPv4 Address of the Default Gateway.
netproto: Network Protocol (One of: "TCP", "UDP" or "ICMP").
nquery: Number of Traceroute queries to perform.
ptype: Payload Type: "Disable", "RandStr", "RandStrTerm" or "Custom".
payload: A byte object for each packet payload (e.g., b'\x01A\x0f\xff\x00') for ptype: 'Custom'.
privaddr: 0 - Default: Normal display of all resolved AS numbers.
1 - Do not show an associated AS Number bound box (cluster) on graph for a private IPv4 Address.
rasn: 0 - Do not resolve AS Numbers - No graph clustering.
1 - Default: Resolve all AS numbers."""
#
# Initialize vars...
trace = [] # Individual trace array
#
# Range check number of query traces
if (nquery < 1):
nquery = 1
#
# Create instance of an MTR class...
mtrc = MTR(nquery = nquery, target = target)
#
# Default to network protocol: "TCP" if not found in list...
plist = ["TCP", "UDP", "ICMP"]
netproto = netproto.upper()
if not netproto in plist:
netproto = "TCP"
mtrc._netprotocol = netproto
#
# Default to source type: "Random" if not found in list...
slist = ["Random", "Increment"]
stype = stype.title()
if not stype in slist:
stype = "Random"
if (stype == "Random"):
sport = RandShort() # Random
elif (stype == "Increment"):
if (srcport != None):
sport = IncrementalValue(start = (srcport - 1), step = 1, restart = 65535) # Increment
#
# Default to payload type to it's default network protocol value if not found in list...
pllist = ["Disabled", "RandStr", "RandStrTerm", "Custom"]
if ptype is None or (not ptype in pllist):
if (netproto == "ICMP"):
ptype = "RandStr" # ICMP: A random string payload to fill out the minimum packet size
elif (netproto == "UDP"):
ptype = "RandStrTerm" # UDP: A random string terminated payload to fill out the minimum packet size
elif (netproto == "TCP"):
ptype = "Disabled" # TCP: Disabled -> The minimum packet size satisfied - no payload required
#
# Set trace interface...
if not iface is None:
mtrc._iface = iface
else:
mtrc._iface = conf.iface
#
# Set Default Gateway...
if not gw is None:
mtrc._gw = gw
#
# Set default verbosity if no override...
if verbose is None:
verbose = conf.verb
#
# Only consider ICMP error packets and TCP packets with at
# least the ACK flag set *and* either the SYN or the RST flag set...
filterundefined = False
if filter is None:
filterundefined = True
filter = "(icmp and (icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12)) or (tcp and (tcp[13] & 0x16 > 0x10))"
#
# Resolve and expand each target...
ntraces = 0 # Total trace count
exptrg = [] # Expanded targets
for t in target:
#
# Use scapy's 'Net' function to expand target...
et = [ip for ip in iter(Net(t))]
exptrg.extend(et)
#
# Map Host Names to IP Addresses and store...
if t in mtrc._host2ip:
mtrc._host2ip[t].extend(et)
else:
mtrc._host2ip[t] = et
#
# Map IP Addresses to Host Names and store...
for a in et:
mtrc._ip2host[a] = t
#
# Store resolved and expanded targets...
mtrc._exptrg = exptrg
#
# Traceroute each expanded target value...
if l4 is None:
#
# Standard Layer: 3 ('TCP', 'UDP' or 'ICMP') tracing...
for n in range(0, nquery):
for t in exptrg:
#
# Execute a traceroute based on network protocol setting...
if (netproto == "ICMP"):
#
# MTR Network Protocol: 'ICMP'
tid = 8 # Use a 'Type: 8 - Echo Request' packet for the trace:
id = 0x8888 # MTR ICMP identifier: '0x8888'
seq = IncrementalValue(start=(minttl - 2), step=1, restart=-10) # Use a Sequence number in step with TTL value
if filterundefined:
#
# Update Filter -> Allow for ICMP echo-request (8) and ICMP echo-reply (0) packet to be processed...
filter = "(icmp and (icmp[0]=8 or icmp[0]=0 or icmp[0]=3 or icmp[0]=4 or icmp[0]=5 or icmp[0]=11 or icmp[0]=12))"
#
# Check payload types:
if (ptype == 'Disabled'):
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl, maxttl))/ICMP(type=tid, id=id, seq=seq),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
if (ptype == 'RandStr'):
#
# Use a random payload string to full out a minimum size PDU of 46 bytes for each ICMP packet:
# Length of 'IP()/ICMP()' = 28, Minimum Protocol Data Unit (PDU) is = 46 -> Therefore a
# payload of 18 octets is required.
pload = RandString(size = 18)
elif (ptype == 'RandStrTerm'):
pload = RandStringTerm(size = 17, term = b'\n') # Random string terminated
elif (ptype == 'Custom'):
pload = payload
#
# ICMP trace with payload...
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl, maxttl))/ICMP(type=tid, id=id, seq=seq)/Raw(load=pload),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
elif (netproto == "UDP"):
#
# MTR Network Protocol: 'UDP'
if filterundefined:
filter += " or udp" # Update Filter -> Allow for processing UDP packets
#
# Check payload types:
if (ptype == 'Disabled'):
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl, maxttl))/UDP(sport=sport, dport=dport),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
if (ptype == 'RandStr'):
#
# Use a random payload string to full out a minimum size PDU of 46 bytes for each UDP packet:
# Length of 'IP()/UDP()' = 28, Minimum PDU is = 46 -> Therefore a payload of 18 octets is required.
pload = RandString(size = 18)
elif (ptype == 'RandStrTerm'):
pload = RandStringTerm(size = 17, term = b'\n') # Random string terminated
elif (ptype == 'Custom'):
pload = payload
#
# UDP trace with payload...
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl, maxttl))/UDP(sport=sport, dport=dport)/Raw(load=pload),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
#
# Default MTR Network Protocol: 'TCP'
#
# Use some TCP options for the trace. Some firewalls will filter
# TCP/IP packets without the 'Timestamp' option set.
#
# Note: The minimum PDU size of 46 is statisfied with the use of TCP options.
#
# Use an integer encoded microsecond timestamp for the TCP option timestamp for each trace sequence.
uts = IntAutoMicroTime()
opts = [('MSS', 1460), ('NOP', None), ('NOP', None), ('Timestamp', (uts, 0)), ('NOP', None), ('WScale', 7)]
seq = RandInt() # Use a random TCP sequence number
#
# Check payload types:
if (ptype == 'Disabled'):
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl, maxttl))/TCP(seq=seq, sport=sport, dport=dport, options=opts),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
else:
if (ptype == 'RandStr'):
pload = RandString(size = 32) # Use a 32 byte random string
elif (ptype == 'RandStrTerm'):
pload = RandStringTerm(size = 32, term = b'\n') # Use a 32 byte random string terminated
elif (ptype == 'Custom'):
pload = payload
#
# TCP trace with payload...
a,b = sr(IP(dst=[t], id=RandShort(),
ttl=(minttl, maxttl))/TCP(seq=seq, sport=sport, dport=dport, options=opts)/Raw(load=pload),
timeout=timeout, filter=filter, verbose=verbose, **kargs)
#
# Create an 'MTracerouteResult' instance for each result packets...
trace.append(MTracerouteResult(res = a.res))
mtrc._res.append(a) # Store Response packets
mtrc._ures.append(b) # Store Unresponse packets
if verbose:
trace[ntraces].show(ntrace = (ntraces + 1))
print()
ntraces += 1
else:
#
# Custom Layer: 4 tracing...
filter="ip"
for n in range(0, nquery):
for t in exptrg:
#
# Run traceroute...
a,b = sr(IP(dst=[t], id=RandShort(), ttl=(minttl,maxttl))/l4,
timeout=timeout, filter=filter, verbose=verbose, **kargs)
trace.append(MTracerouteResult(res = a.res))
mtrc._res.append(a)
mtrc._ures.append(b)
if verbose:
trace[ntraces].show(ntrace = (ntraces + 1))
print()
ntraces += 1
#
# Store total trace run count...
mtrc._ntraces = ntraces
#
# Get the trace components...
# for n in range(0, ntraces):
for n in range(0, mtrc._ntraces):
trace[n].get_trace_components(mtrc, n)
#
# Compute any Black Holes...
mtrc.get_black_holes()
#
# Compute Trace Hop Ranges...
mtrc.compute_hop_ranges()
#
# Resolve AS Numbers...
if rasn:
mtrc.get_asns(privaddr)
#
# Try to guess ASNs for Traceroute 'Unkown Hops'...
mtrc.guess_unk_asns()
#
# Debug: Print object vars at verbose level 8...
if (verbose == 8):
print("mtrc._target (User Target(s)):")
print("=======================================================")
print(mtrc._target)
print("\nmtrc._exptrg (Resolved and Expanded Target(s)):")
print("=======================================================")
print(mtrc._exptrg)
print("\nmtrc._host2ip (Target Host Name to IP Address):")
print("=======================================================")
print(mtrc._host2ip)
print("\nmtrc._ip2host (Target IP Address to Host Name):")
print("=======================================================")
print(mtrc._ip2host)
print("\nmtrc._res (Trace Response Packets):")
print("=======================================================")
print(mtrc._res)
print("\nmtrc._ures (Trace Unresponse Packets):")
print("=======================================================")
print(mtrc._ures)
print("\nmtrc._ips (Trace Unique IPv4 Addresses):")
print("=======================================================")
print(mtrc._ips)
print("\nmtrc._rt (Individual Route Traces):")
print("=======================================================")
print(mtrc._rt)
print("\nmtrc._rtt (Round Trip Times (msecs) for Trace Nodes):")
print("=======================================================")
print(mtrc._rtt)
print("\nmtrc._hops (Traceroute Hop Ranges):")
print("=======================================================")
print(mtrc._hops)
print("\nmtrc._tlblid (Target Trace Label IDs):")
print("=======================================================")
print(mtrc._tlblid)
print("\nmtrc._ports (Completed Targets & Ports):")
print("=======================================================")
print(mtrc._ports)
print("\nmtrc._portsdone (Completed Trace Routes & Ports):")
print("=======================================================")
print(mtrc._portsdone)
print("\nconf.L3socket (Layer 3 Socket Method):")
print("=======================================================")
print(conf.L3socket)
print("\nconf.AS_resolver Resolver (AS Resolver Method):")
print("=======================================================")
print(conf.AS_resolver)
print("\nmtrc._asns (AS Numbers):")
print("=======================================================")
print(mtrc._asns)
print("\nmtrc._asds (AS Descriptions):")
print("=======================================================")
print(mtrc._asds)
print("\nmtrc._unks (Unknown Hops IP Boundary for AS Numbers):")
print("=======================================================")
print(mtrc._unks)
print("\nmtrc._iface (Trace Interface):")
print("=======================================================")
print(mtrc._iface)
print("\nmtrc._gw (Trace Default Gateway IPv4 Address):")
print("=======================================================")
print(mtrc._gw)
return mtrc
#############################
## Simple TCP client stack ##
#############################
class TCP_client(Automaton):
def parse_args(self, ip, port, *args, **kargs):
self.dst = next(iter(Net(ip)))
self.dport = port
self.sport = random.randrange(0,2**16)
self.l4 = IP(dst=ip)/TCP(sport=self.sport, dport=self.dport, flags=0,
seq=random.randrange(0,2**32))
self.src = self.l4.src
self.swin=self.l4[TCP].window
self.dwin=1
self.rcvbuf=""
bpf = "host %s and host %s and port %i and port %i" % (self.src,
self.dst,
self.sport,
self.dport)
# bpf=None
Automaton.parse_args(self, filter=bpf, **kargs)
def master_filter(self, pkt):
return (IP in pkt and
pkt[IP].src == self.dst and
pkt[IP].dst == self.src and
TCP in pkt and
pkt[TCP].sport == self.dport and
pkt[TCP].dport == self.sport and
self.l4[TCP].seq >= pkt[TCP].ack and # XXX: seq/ack 2^32 wrap up
((self.l4[TCP].ack == 0) or (self.l4[TCP].ack <= pkt[TCP].seq <= self.l4[TCP].ack+self.swin)) )
@ATMT.state(initial=1)
def START(self):
pass
@ATMT.state()
def SYN_SENT(self):
pass
@ATMT.state()
def ESTABLISHED(self):
pass
@ATMT.state()
def LAST_ACK(self):
pass
@ATMT.state(final=1)
def CLOSED(self):
pass
@ATMT.condition(START)
def connect(self):
raise self.SYN_SENT()
@ATMT.action(connect)
def send_syn(self):
self.l4[TCP].flags = "S"
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(SYN_SENT)
def synack_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x12:
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(synack_received)
def send_ack_of_synack(self, pkt):
self.l4[TCP].ack = pkt[TCP].seq+1
self.l4[TCP].flags = "A"
self.send(self.l4)
@ATMT.receive_condition(ESTABLISHED)
def incoming_data_received(self, pkt):
if not isinstance(pkt[TCP].payload, NoPayload) and not isinstance(pkt[TCP].payload, conf.padding_layer):
raise self.ESTABLISHED().action_parameters(pkt)
@ATMT.action(incoming_data_received)
def receive_data(self,pkt):
data = (bytes(pkt[TCP].payload))
if data and self.l4[TCP].ack == pkt[TCP].seq:
self.l4[TCP].ack += len(data)
self.l4[TCP].flags = "A"
self.send(self.l4)
self.rcvbuf += data
if pkt[TCP].flags & 8 != 0: #PUSH
self.oi.tcp.send(self.rcvbuf)
self.rcvbuf = ""
@ATMT.ioevent(ESTABLISHED,name="tcp", as_supersocket="tcplink")
def outgoing_data_received(self, fd):
raise self.ESTABLISHED().action_parameters(fd.recv())
@ATMT.action(outgoing_data_received)
def send_data(self, d):
self.l4[TCP].flags = "PA"
self.send(self.l4/d)
self.l4[TCP].seq += len(d)
@ATMT.receive_condition(ESTABLISHED)
def reset_received(self, pkt):
if pkt[TCP].flags & 4 != 0:
raise self.CLOSED()
@ATMT.receive_condition(ESTABLISHED)
def fin_received(self, pkt):
if pkt[TCP].flags & 0x1 == 1:
raise self.LAST_ACK().action_parameters(pkt)
@ATMT.action(fin_received)
def send_finack(self, pkt):
self.l4[TCP].flags = "FA"
self.l4[TCP].ack = pkt[TCP].seq+1
self.send(self.l4)
self.l4[TCP].seq += 1
@ATMT.receive_condition(LAST_ACK)
def ack_of_fin_received(self, pkt):
if pkt[TCP].flags & 0x3f == 0x10:
raise self.CLOSED()
#####################
## Reporting stuff ##
#####################
def report_ports(target, ports):
"""portscan a target and output a LaTeX table
report_ports(target, ports) -> string"""
ans,unans = sr(IP(dst=target)/TCP(dport=ports),timeout=5)
rep = "\\begin{tabular}{|r|l|l|}\n\\hline\n"
for s,r in ans:
if not r.haslayer(ICMP):
if r.payload.flags == 0x12:
rep += r.sprintf("%TCP.sport% & open & SA \\\\\n")
rep += "\\hline\n"
for s,r in ans:
if r.haslayer(ICMP):
rep += r.sprintf("%TCPerror.dport% & closed & ICMP type %ICMP.type%/%ICMP.code% from %IP.src% \\\\\n")
elif r.payload.flags != 0x12:
rep += r.sprintf("%TCP.sport% & closed & TCP %TCP.flags% \\\\\n")
rep += "\\hline\n"
for i in unans:
rep += i.sprintf("%TCP.dport% & ? & unanswered \\\\\n")
rep += "\\hline\n\\end{tabular}\n"
return rep
def IPID_count(lst, funcID=lambda x:x[1].id, funcpres=lambda x:x[1].summary()):
idlst = map(funcID, lst)
idlst.sort()
#classes = [idlst[0]]+map(lambda x:x[1],filter(lambda (x,y): abs(x-y)>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:])))
classes = [idlst[0]]+list(map(lambda x:x[1],filter(lambda a: abs(a[0]-a[1])>50, map(lambda x,y: (x,y),idlst[:-1], idlst[1:]))))
lst = map(lambda x:(funcID(x), funcpres(x)), lst)
lst.sort()
print("Probably %i classes:" % len(classes), classes)
for id,pr in lst:
print("%5i" % id, pr)
def fragleak(target,sport=123, dport=123, timeout=0.2, onlyasc=0):
load = "XXXXYYYYYYYYYY"
# getmacbyip(target)
# pkt = IP(dst=target, id=RandShort(), options="\x22"*40)/UDP()/load
pkt = IP(dst=target, id=RandShort(), options="\x00"*40, flags=1)/UDP(sport=sport, dport=sport)/load
s=conf.L3socket()
intr=0
found={}
try:
while 1:
try:
if not intr:
s.send(pkt)
sin,sout,serr = select([s],[],[],timeout)
if not sin:
continue
ans=s.recv(1600)
if not isinstance(ans, IP): #TODO: IPv6
continue
if not isinstance(ans.payload, ICMP):
continue
if not isinstance(ans.payload.payload, IPerror):
continue
if ans.payload.payload.dst != target:
continue
if ans.src != target:
print("leak from", ans.src,end=" ")
# print repr(ans)
if not ans.haslayer(conf.padding_layer):
continue
# print repr(ans.payload.payload.payload.payload)
# if not isinstance(ans.payload.payload.payload.payload, conf.raw_layer):
# continue
# leak = ans.payload.payload.payload.payload.load[len(load):]
leak = ans.getlayer(conf.padding_layer).load
if leak not in found:
found[leak]=None
linehexdump(leak, onlyasc=onlyasc)
except KeyboardInterrupt:
if intr:
raise
intr=1
except KeyboardInterrupt:
pass
def fragleak2(target, timeout=0.4, onlyasc=0):
found={}
try:
while 1:
p = sr1(IP(dst=target, options="\x00"*40, proto=200)/"XXXXYYYYYYYYYYYY",timeout=timeout,verbose=0)
if not p:
continue
if conf.padding_layer in p:
leak = p[conf.padding_layer].load
if leak not in found:
found[leak]=None
linehexdump(leak,onlyasc=onlyasc)
except:
pass
conf.stats_classic_protocols += [TCP,UDP,ICMP]
conf.stats_dot11_protocols += [TCP,UDP,ICMP]
if conf.ipv6_enabled:
import scapy.layers.inet6
|
mit
|
klieret/pyplot-hierarchical-pie
|
examples/minimal_example_exploded.py
|
1
|
1837
|
#!/usr/bin/env python3
import os.path
import matplotlib
import matplotlib.pyplot as plt
from hpie import HPie, Path, stringvalues_to_pv
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
axs = [ax0, ax1, ax2, ax3]
fig.set_size_inches(10, 10)
# set up some random data
data = stringvalues_to_pv({
'ipsum': 40.45,
'ipsum/eirmod': 29.34,
'ipsum/eirmod/dolor': 94.4,
'lorem': 36.12,
'lorem/sadipscing/dolor': 44.32,
'lorem/sadipscing/lorem': 37.15,
'lorem/sadipscing/nonumy': 23.98,
'lorem/eirmod': 11.12,
'lorem/eirmod/lorem': 45.65,
'lorem/sadipscing': 79.67,
})
axs[0].set_title('Standard HPie')
axs[1].set_title('Completely exploded')
axs[2].set_title('Explode one slice')
axs[3].set_title('Explode multiple slices')
hps = [HPie(data, ax) for ax in axs]
# noinspection PyUnusedLocal
def wedge_gap1(path: Path):
return 0, 0.1
def wedge_gap2(path: Path):
if path == Path(("ipsum", )):
return 0, 0.2
else:
return 0, 0
def wedge_gap3(path: Path):
if path == Path(("lorem", "eirmod")):
return 0, 0.35
elif path == Path(("ipsum", )):
return 0, 0.5
elif path.startswith(Path(("lorem", ))):
return 0, 0.1
else:
return 0, 0
hps[1].wedge_spacing = wedge_gap1
hps[2].wedge_spacing = wedge_gap2
hps[3].wedge_spacing = wedge_gap3
for i, hp in enumerate(hps):
hp.format_value_text = lambda path: ""
hp.plot(setup_axes=True)
fig.tight_layout(pad=0.5)
# save/show plot
fig.savefig(os.path.join(os.path.dirname(__file__), "figures",
"{}.png".format(os.path.basename(__file__))),
dpi=100,
bbox_inches='tight')
if __name__ == "__main__":
plt.show()
|
bsd-3-clause
|
weidnem/IntroPython2016
|
students/crobison/session08/tweeter_connector.py
|
4
|
2407
|
#!/usr/bin/env python3
# Charles Robison
# Term project
import twitter
import json
import pandas as pd
import config
CONSUMER_KEY = config.CONSUMER_KEY
CONSUMER_SECRET = config.CONSUMER_SECRET
OAUTH_TOKEN = config.OAUTH_TOKEN
OAUTH_TOKEN_SECRET = config.OAUTH_TOKEN_SECRET
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
def connection_confirmation():
print("Confirming API connection...")
print(twitter_api)
'''
Geo IDs for search, good resource here:
http://www.knowbeforeyougo.co/yahooWOEIDs.cfm
'''
WORLD_WOE_ID = 1
US_WOE_ID = 23424977
world_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)
world_trends_set = set([trend['name']
for trend in world_trends[0]['trends']])
# print("Printing World trends:")
# print(json.dumps(world_trends, indent=1))
us_trends = twitter_api.trends.place(_id=US_WOE_ID)
us_trends_set = set([trend['name']
for trend in us_trends[0]['trends']])
# print("Printing US trends:")
# print(json.dumps(us_trends, indent=1))
'''
Attempt to arrange objects into class
'''
# class geographies:
# WORLD_WOE_ID = 1
# US_WOE_ID = 23424977
# def global_trends():
# world_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)
# world_trends_set = set([trend['name']
# for trend in world_trends[0]['trends']])
# def local_trends()
# us_trends = twitter_api.trends.place(_id=US_WOE_ID)
# us_trends_set = set([trend['name']
# for trend in us_trends[0]['trends']])
common_trends = world_trends_set.intersection(us_trends_set)
# print('Printing common trends:')
# print(common_trends)
us_trends_json = json.dumps(us_trends, indent = 1)
# print(us_trends_json)
us_trends_data = us_trends[0]['trends']
print('Printing trend object data type: ')
type(us_trends_data)
print()
# print(us_trends_data)
data = []
names = [i['name'] for i in us_trends_data]
data.append(names)
print('Printing US trend results as list:')
print(data)
print()
names = [d['name'] for d in us_trends_data]
print('Printing US trends and tweet volume in readable format:')
for a in us_trends_data:
print(a['name'], a['tweet_volume'])
print()
# Alternatively, creating data frame with results:
df = pd.read_json(us_trends_json)
print('printing data frame: ')
print('Printing US trends as data frame:')
print(df.head())
|
unlicense
|
wzbozon/statsmodels
|
statsmodels/examples/ex_misc_tarma.py
|
34
|
1875
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 03 23:01:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
from statsmodels.miscmodels.tmodel import TArma
from statsmodels.tsa.arima_model import ARMA
nobs = 500
ar = [1, -0.6, -0.1]
ma = [1, 0.7]
dist = lambda n: np.random.standard_t(3, size=n)
np.random.seed(8659567)
x = arma_generate_sample(ar, ma, nobs, sigma=1, distrvs=dist,
burnin=500)
mod = TArma(x)
order = (2, 1)
res = mod.fit(order=order)
res2 = mod.fit_mle(order=order, start_params=np.r_[res[0], 5, 1], method='nm')
print(res[0])
proc = ArmaProcess.from_coeffs(res[0][:order[0]], res[0][:order[1]])
print(ar, ma)
proc.nobs = nobs
# TODO: bug nobs is None, not needed ?, used in ArmaProcess.__repr__
print(proc.ar, proc.ma)
print(proc.ar_roots(), proc.ma_roots())
from statsmodels.tsa.arma_mle import Arma
modn = Arma(x)
resn = modn.fit_mle(order=order)
moda = ARMA(x, order=order)
resa = moda.fit( trend='nc')
print('\nparameter estimates')
print('ls ', res[0])
print('norm', resn.params)
print('t ', res2.params)
print('A ', resa.params)
print('\nstandard deviation of parameter estimates')
#print 'ls ', res[0] #TODO: not available yet
print('norm', resn.bse)
print('t ', res2.bse)
print('A ', resa.bse)
print('A/t-1', resa.bse / res2.bse[:3] - 1)
print('other bse')
print(resn.bsejac)
print(resn.bsejhj)
print(res2.bsejac)
print(res2.bsejhj)
print(res2.t_test(np.eye(len(res2.params))))
# TArma has no fittedvalues and resid
# TODO: check if lag is correct or if fitted `x-resid` is shifted
resid = res2.model.geterrors(res2.params)
fv = res[2]['fvec'] #resid returned from leastsq?
import matplotlib.pyplot as plt
plt.plot(x, 'o', alpha=0.5)
plt.plot(x-resid)
plt.plot(x-fv)
#plt.show()
|
bsd-3-clause
|
cython-testbed/pandas
|
pandas/plotting/_converter.py
|
1
|
39090
|
import warnings
from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas._libs import tslibs
from pandas._libs.tslibs import resolution
from pandas.core.dtypes.common import (
is_float, is_integer,
is_integer_dtype,
is_float_dtype,
is_datetime64_ns_dtype,
is_period_arraylike,
is_nested_list_like
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.compat import lrange
import pandas.compat as compat
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.indexes.datetimes import date_range
import pandas.core.tools.datetimes as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.core.indexes.period import Period, PeriodIndex
from pandas.plotting._compat import _mpl_le_2_0_0
# constants
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
MUSEC_PER_DAY = 1e6 * SEC_PER_DAY
_WARN = True # Global for whether pandas has registered the units explicitly
_mpl_units = {} # Cache for units overwritten by us
def get_pairs():
pairs = [
(tslibs.Timestamp, DatetimeConverter),
(Period, PeriodConverter),
(pydt.datetime, DatetimeConverter),
(pydt.date, DatetimeConverter),
(pydt.time, TimeConverter),
(np.datetime64, DatetimeConverter),
]
return pairs
def register(explicit=True):
"""Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
"""
# Renamed in pandas.plotting.__init__
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter
def deregister():
"""Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
# We use type to catch our classes directly, no inheritance
if type(units.registry.get(type_)) is cls:
units.registry.pop(type_)
# restore the old keys
for unit, formatter in _mpl_units.items():
if type(formatter) not in {DatetimeConverter, PeriodConverter,
TimeConverter}:
# make it idempotent by excluding ours.
units.registry[unit] = formatter
def _check_implicitly_registered():
global _WARN
if _WARN:
msg = ("Using an implicitly registered datetime converter for a "
"matplotlib plotting method. The converter was registered "
"by pandas on import. Future versions of pandas will require "
"you to explicitly register matplotlib converters.\n\n"
"To register the converters:\n\t"
">>> from pandas.plotting import register_matplotlib_converters"
"\n\t"
">>> register_matplotlib_converters()")
warnings.warn(msg, FutureWarning)
_WARN = False
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time {d}'.format(d=d))
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or is_integer(value) or
is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
"""
Return the time of day as a formatted string.
Parameters
----------
x : float
The time of day specified as seconds since 00:00 (midnight),
with up to microsecond precision.
pos
Unused
Returns
-------
str
A string in HH:MM:SS.mmmuuu format. Microseconds,
milliseconds and seconds are only displayed if non-zero.
"""
fmt = '%H:%M:%S.%f'
s = int(x)
msus = int(round((x - s) * 1e6))
ms = msus // 1000
us = msus % 1000
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
return pydt.time(h, m, s, msus).strftime(fmt)
elif ms != 0:
return pydt.time(h, m, s, msus).strftime(fmt)[:-3]
elif s != 0:
return pydt.time(h, m, s).strftime('%H:%M:%S')
return pydt.time(h, m).strftime('%H:%M')
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if is_nested_list_like(values):
values = [PeriodConverter._convert_1d(v, units, axis)
for v in values]
else:
values = PeriodConverter._convert_1d(values, units, axis)
return values
@staticmethod
def _convert_1d(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time, np.datetime64)
if (isinstance(values, valid_types) or is_integer(values) or
is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq)._ndarray_values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq)._ndarray_values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time, np.datetime64)):
return Period(date, freq).ordinal
elif (is_integer(date) or is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '{date}'".format(date=date))
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, ABCSeries)
) and is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
# values might be a 1-d array, or a list-like of arrays.
_check_implicitly_registered()
if is_nested_list_like(values):
values = [DatetimeConverter._convert_1d(v, unit, axis)
for v in values]
else:
values = DatetimeConverter._convert_1d(values, unit, axis)
return values
@staticmethod
def _convert_1d(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(tslibs.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (is_integer(values) or is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index, ABCSeries)):
if isinstance(values, ABCSeries):
# https://github.com/matplotlib/matplotlib/issues/11391
# Series was skipped. Convert to DatetimeIndex to get asi8
values = Index(values)
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com.asarray_tuplesafe(values)
if is_integer_dtype(values) or is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = _dt_to_float_ordinal(values)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
values = _dt_to_float_ordinal(values)
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
# For mpl > 2.0 the format strings are controlled via rcparams
# so do not mess with them. For mpl < 2.0 change the second
# break point and add a musec break point
if _mpl_le_2_0_0():
self.scaled[1. / SEC_PER_DAY] = '%H:%M:%S'
self.scaled[1. / MUSEC_PER_DAY] = '%H:%M:%S.%f'
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
_check_implicitly_registered()
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
_check_implicitly_registered()
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate '
'{estimate:d} ticks from {dmin} to {dmax}: '
'exceeds Locator.MAXTICKS'
'* 2 ({arg:d}) ').format(
estimate=estimate, dmin=dmin, dmax=dmax,
arg=self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed,
freq=freq, tz=tz).astype(object)
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
return np.nonzero(current - previous)[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: {freq}".format(freq=freq))
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif resolution.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_._ndarray_values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = resolution.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: {freq}".format(freq=freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
_check_implicitly_registered()
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
_check_implicitly_registered()
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
_check_implicitly_registered()
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
class TimeSeries_TimedeltaFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`.
"""
@staticmethod
def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s
def __call__(self, x, pos=0):
_check_implicitly_registered()
(vmin, vmax) = tuple(self.axis.get_view_interval())
n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin))))
if n_decimals > 9:
n_decimals = 9
return self.format_timedelta_ticks(x, pos, n_decimals)
|
bsd-3-clause
|
sniemi/SamPy
|
sandbox/src1/examples/custom_scale_example.py
|
1
|
6165
|
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
class MercatorLatitudeScale(mscale.ScaleBase):
"""
Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using
the system used to scale latitudes in a Mercator projection.
The scale function:
ln(tan(y) + sec(y))
The inverse scale function:
atan(sinh(y))
Since the Mercator scale tends to infinity at +/- 90 degrees,
there is user-defined threshold, above and below which nothing
will be plotted. This defaults to +/- 85 degrees.
source:
http://en.wikipedia.org/wiki/Mercator_projection
"""
# The scale class must have a member ``name`` that defines the
# string used to select the scale. For example,
# ``gca().set_yscale("mercator")`` would be used to select this
# scale.
name = 'mercator'
def __init__(self, axis, **kwargs):
"""
Any keyword arguments passed to ``set_xscale`` and
``set_yscale`` will be passed along to the scale's
constructor.
thresh: The degree above which to crop the data.
"""
mscale.ScaleBase.__init__(self)
thresh = kwargs.pop("thresh", (85 / 180.0) * npy.pi)
if thresh >= npy.pi / 2.0:
raise ValueError("thresh must be less than pi/2")
self.thresh = thresh
def get_transform(self):
"""
Override this method to return a new instance that does the
actual transformation of the data.
The MercatorLatitudeTransform class is defined below as a
nested class of this one.
"""
return self.MercatorLatitudeTransform(self.thresh)
def set_default_locators_and_formatters(self, axis):
"""
Override to set up the locators and formatters to use with the
scale. This is only required if the scale requires custom
locators and formatters. Writing custom locators and
formatters is rather outside the scope of this example, but
there are many helpful examples in ``ticker.py``.
In our case, the Mercator example uses a fixed locator from
-90 to 90 degrees and a custom formatter class to put convert
the radians to degrees and put a degree symbol after the
value::
"""
class DegreeFormatter(Formatter):
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
return u"%d\u00b0" % ((x / npy.pi) * 180.0)
deg2rad = npy.pi / 180.0
axis.set_major_locator(FixedLocator(
npy.arange(-90, 90, 10) * deg2rad))
axis.set_major_formatter(DegreeFormatter())
axis.set_minor_formatter(DegreeFormatter())
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Override to limit the bounds of the axis to the domain of the
transform. In the case of Mercator, the bounds should be
limited to the threshold that was passed in. Unlike the
autoscaling provided by the tick locators, this range limiting
will always be adhered to, whether the axis range is set
manually, determined automatically or changed through panning
and zooming.
"""
return max(vmin, -self.thresh), min(vmax, self.thresh)
class MercatorLatitudeTransform(mtransforms.Transform):
# There are two value members that must be defined.
# ``input_dims`` and ``output_dims`` specify number of input
# dimensions and output dimensions to the transformation.
# These are used by the transformation framework to do some
# error checking and prevent incompatible transformations from
# being connected together. When defining transforms for a
# scale, which are, by definition, separable and have only one
# dimension, these members should always be set to 1.
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform(self, a):
"""
This transform takes an Nx1 ``numpy`` array and returns a
transformed copy. Since the range of the Mercator scale
is limited by the user-specified threshold, the input
array must be masked to contain only valid values.
``matplotlib`` will handle masked arrays and remove the
out-of-range data from the plot. Importantly, the
``transform`` method *must* return an array that is the
same shape as the input array, since these values need to
remain synchronized with values in the other dimension.
"""
masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a)
if masked.mask.any():
return ma.log(npy.abs(ma.tan(masked) + 1.0 / ma.cos(masked)))
else:
return npy.log(npy.abs(npy.tan(a) + 1.0 / npy.cos(a)))
def inverted(self):
"""
Override this method so matplotlib knows how to get the
inverse transform for this transform.
"""
return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh)
class InvertedMercatorLatitudeTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, thresh):
mtransforms.Transform.__init__(self)
self.thresh = thresh
def transform(self, a):
return npy.arctan(npy.sinh(a))
def inverted(self):
return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh)
# Now that the Scale class has been defined, it must be registered so
# that ``matplotlib`` can find it.
mscale.register_scale(MercatorLatitudeScale)
from pylab import *
import numpy as npy
t = arange(-180.0, 180.0, 0.1)
s = t / 360.0 * npy.pi
plot(t, s, '-', lw=2)
gca().set_yscale('mercator')
xlabel('Longitude')
ylabel('Latitude')
title('Mercator: Projection of the Oppressor')
grid(True)
show()
|
bsd-2-clause
|
aarchiba/scipy
|
scipy/optimize/_lsq/least_squares.py
|
4
|
38264
|
"""Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
def check(tol, name):
if tol is None:
tol = 0
elif tol < EPS:
warn("Setting `{}` below the machine epsilon ({:.2e}) effectively "
"disables the corresponding termination condition."
.format(name, EPS))
return tol
ftol = check(ftol, "ftol")
xtol = check(xtol, "xtol")
gtol = check(gtol, "gtol")
if ftol < EPS and xtol < EPS and gtol < EPS:
raise ValueError("At least one of the tolerances must be higher than "
"machine epsilon ({:.2e}).".format(EPS))
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar. If the
argument ``x`` is complex or the function ``fun`` returns complex
residuals, it must be wrapped in a real function of real arguments,
as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as many operations as '2-point' (default). The scheme 'cs'
uses complex steps, and while potentially the most accurate, it is
applicable only when `fun` correctly handles complex inputs and
can be analytically continued to the complex plane. Method 'lm'
always uses the '2-point' scheme. If callable, it is used as
``jac(x, *args, **kwargs)`` and should return a good approximation
(or the exact value) for the Jacobian as an array_like (np.atleast_2d
is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float or None, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step. If None, the termination by this
condition is disabled.
xtol : float or None, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
If None, the termination by this condition is disabled.
gtol : float or None, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
If None, the termination by this condition is disabled.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independent variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-dimensional complex function of n complex
variables we optimize a 2m-dimensional real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like. "
"f0.shape: {0}".format(f0.shape))
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
|
bsd-3-clause
|
tgsmith61591/skutil
|
skutil/preprocessing/transform.py
|
1
|
34160
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import numpy as np
import pandas as pd
from scipy import optimize
from scipy.stats import boxcox
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.externals import six
from sklearn.externals.joblib import Parallel, delayed
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from skutil.base import *
from ..utils import *
from ..utils.fixes import _cols_if_none
__all__ = [
'BoxCoxTransformer',
'FunctionMapper',
'InteractionTermTransformer',
'SelectiveScaler',
'SpatialSignTransformer',
'YeoJohnsonTransformer'
]
# A very small number used to measure differences.
# If the absolute difference between two numbers is
# <= EPS, it is considered equal.
EPS = 1e-12
# A very small number used to represent zero.
ZERO = 1e-16
# Helper funtions:
def _eqls(lam, v):
return np.abs(lam - v) <= EPS
def _validate_rows(X):
m, n = X.shape
if m < 2:
raise ValueError('n_samples should be at least two, but got %i' % m)
class FunctionMapper(BaseSkutil, TransformerMixin):
"""Apply a function to a column or set of columns.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation.
fun : function, (default=None)
The function to apply to the feature(s). This function will be
applied via lambda expression to each column (independent of
one another). Therefore, the callable should accept an array-like
argument.
Attributes
----------
is_fit_ : bool
The ``FunctionMapper`` callable is set in the constructor,
but to remain true to the sklearn API, we need to ensure ``fit``
is called prior to ``transform``. Thus, we set this attribute in
the ``fit`` method, which performs some validation, to ensure the
``fun`` parameter has been validated.
Examples
--------
The following example will apply a cube-root transformation
to the first two columns in the iris dataset.
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>> import numpy as np
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> # define the function
>>> def cube_root(x):
... return np.power(x, 0.333)
>>>
>>> # make our transformer
>>> trans = FunctionMapper(cols=X.columns[:2], fun=cube_root)
>>> trans.fit_transform(X).head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 1.720366 1.517661 1.4 0.2
1 1.697600 1.441722 1.4 0.2
2 1.674205 1.473041 1.3 0.2
3 1.662258 1.457550 1.5 0.2
4 1.709059 1.531965 1.4 0.2
"""
def __init__(self, cols=None, fun=None, **kwargs):
super(FunctionMapper, self).__init__(cols=cols)
self.fun = fun
self.kwargs = kwargs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# Check this second in this case
X, self.cols = validate_is_pd(X, self.cols)
# validate the function. If none, make it a passthrough
if not self.fun:
def pass_through(x):
return x
self.fun = pass_through
else:
# check whether is function
if not hasattr(self.fun, '__call__'):
raise ValueError('passed fun arg is not a function')
# since we aren't checking is fit, we should set
# an arbitrary value to show validation has already occurred
self.is_fit_ = True
# TODO: this might cause issues in de-pickling, as we're
# going to be pickling a non-instance method... solve this.
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'is_fit_')
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# apply the function
# TODO: do we want to change the behavior to where the function
# should accept an entire frame and not a series?
X[cols] = X[cols].apply(lambda x: self.fun(x, **self.kwargs))
return X
def _mul(a, b):
"""Multiplies two series objects
(no validation since internally used).
Parameters
----------
a : Pandas ``Series``
One of two Pandas ``Series`` objects that will
be interacted together.
b : Pandas ``Series``
One of two Pandas ``Series`` objects that will
be interacted together.
Returns
-------
product np.ndarray
"""
return (a * b).values
class InteractionTermTransformer(BaseSkutil, TransformerMixin):
"""A class that will generate interaction terms between selected columns.
An interaction captures some relationship between two independent variables
in the form of In = (xi * xj).
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
interaction : callable, optional (default=None)
A callable for interactions. Default None will
result in multiplication of two Series objects
name_suffix : str, optional (default='I')
The suffix to add to the new feature name in the form of
<feature_x>_<feature_y>_<suffix>
only_return_interactions : bool, optional (default=False)
If set to True, will only return features in feature_names
and their respective generated interaction terms.
Attributes
----------
fun_ : callable
The interaction term function
Examples
--------
The following example interacts the first two columns of the iris
dataset using the default ``_mul`` function (product).
>>> from skutil.preprocessing import InteractionTermTransformer
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> trans = InteractionTermTransformer(cols=X.columns[:2])
>>> X_transform = trans.fit_transform(X)
>>>
>>> assert X_transform.shape[1] == X.shape[1] + 1 # only added one column
>>> X_transform[X_transform.columns[-1]].head()
0 17.85
1 14.70
2 15.04
3 14.26
4 18.00
Name: sepal length (cm)_sepal width (cm)_I, dtype: float64
"""
def __init__(self, cols=None, as_df=True, interaction_function=None,
name_suffix='I', only_return_interactions=False):
super(InteractionTermTransformer, self).__init__(cols=cols, as_df=as_df)
self.interaction_function = interaction_function
self.name_suffix = name_suffix
self.only_return_interactions = only_return_interactions
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
self.fun_ = self.interaction_function if self.interaction_function is not None else _mul
# validate function
if not hasattr(self.fun_, '__call__'):
raise TypeError('require callable for interaction_function')
# validate cols
if len(cols) < 2:
raise ValueError('need at least two columns')
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'fun_')
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
n_features = len(cols)
suff = self.name_suffix
fun = self.fun_
append_dict = {}
interaction_names = [x for x in cols]
# we can do this in N^2 or we can do it in the uglier N choose 2...
for i in range(n_features - 1):
for j in range(i + 1, n_features):
col_i, col_j = cols[i], cols[j]
new_nm = '%s_%s_%s' % (col_i, col_j, suff)
append_dict[new_nm] = fun(X[col_i], X[col_j])
interaction_names.append(new_nm)
# create DF 2:
df2 = pd.DataFrame.from_dict(append_dict)
X = pd.concat([X, df2], axis=1)
# if we only want to keep interaction names, filter now
X = X if not self.only_return_interactions else X[interaction_names]
# return matrix if needed
return X if self.as_df else X.as_matrix()
class SelectiveScaler(BaseSkutil, TransformerMixin):
"""A class that will apply scaling only to a select group
of columns. Useful for data that may contain features that should not
be scaled, such as those that have been dummied, or for any already-in-scale
features. Perhaps, even, there are some features you'd like to scale in
a different manner than others. This, then, allows two back-to-back
``SelectiveScaler`` instances with different columns & strategies in a
pipeline object.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
scaler : instance of a sklearn Scaler, optional (default=StandardScaler)
The scaler to fit against ``cols``. Must be an instance of
``sklearn.preprocessing.BaseScaler``.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
is_fit_ : bool
The ``SelectiveScaler`` parameter ``scaler`` is set in the constructor,
but to remain true to the sklearn API, we need to ensure ``fit``
is called prior to ``transform``. Thus, we set this attribute in
the ``fit`` method, which performs some validation, to ensure the
``scaler`` parameter has been validated.
Examples
--------
The following example will scale only the first two features
in the iris dataset:
>>> from skutil.preprocessing import SelectiveScaler
>>> from skutil.utils import load_iris_df
>>> import pandas as pd
>>> import numpy as np
>>>
>>> X = load_iris_df(include_tgt=False)
>>>
>>> trans = SelectiveScaler(cols=X.columns[:2])
>>> X_transform = trans.fit_transform(X)
>>>
>>> X_transform.head()
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 -0.900681 1.032057 1.4 0.2
1 -1.143017 -0.124958 1.4 0.2
2 -1.385353 0.337848 1.3 0.2
3 -1.506521 0.106445 1.5 0.2
4 -1.021849 1.263460 1.4 0.2
"""
def __init__(self, cols=None, scaler=StandardScaler(), as_df=True):
super(SelectiveScaler, self).__init__(cols=cols, as_df=as_df)
self.scaler = scaler
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# throws exception if the cols don't exist
self.scaler.fit(X[cols])
# this is our fit param
self.is_fit_ = True
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# Fails through if cols don't exist or if the scaler isn't fit yet
X[cols] = self.scaler.transform(X[cols])
return X if self.as_df else X.as_matrix()
class BoxCoxTransformer(BaseSkutil, TransformerMixin):
"""Estimate a lambda parameter for each feature, and transform
it to a distribution more-closely resembling a Gaussian bell
using the Box-Cox transformation.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
shift_amt : float, optional (default=1e-6)
Since the Box-Cox transformation requires that all values be positive
(above zero), any features that contain sub-zero elements will be shifted
up by the absolute value of the minimum element plus this amount in the ``fit``
method. In the ``transform`` method, if any of the test data is less than zero
after shifting, it will be truncated at the ``shift_amt`` value.
Attributes
----------
shift_ : dict
The shifts for each feature needed to shift the min value in
the feature up to at least 0.0, as every element must be positive
lambda_ : dict
The lambda values corresponding to each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True, shift_amt=1e-6):
super(BoxCoxTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
self.shift_amt = shift_amt
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
# ensure enough rows
_validate_rows(X)
# First step is to compute all the shifts needed, then add back to X...
min_Xs = X[cols].min(axis=0)
shift = np.array([np.abs(x) + self.shift_amt if x <= 0.0 else 0.0 for x in min_Xs])
X[cols] += shift
# now put shift into a dict
self.shift_ = dict(zip(cols, shift))
# Now estimate the lambdas in parallel
self.lambda_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_estimate_lambda_single_y)
(X[i].tolist()) for i in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'shift_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols, assert_all_finite=True)
cols = _cols_if_none(X, self.cols)
_, n_features = X.shape
lambdas_, shifts_ = self.lambda_, self.shift_
# Add the shifts in, and if they're too low,
# we have to truncate at some low value: 1e-6
for nm in cols:
X[nm] += shifts_[nm]
# If the shifts are too low, truncate...
X = X.apply(lambda x: x.apply(lambda y: np.maximum(self.shift_amt, y)))
# do transformations
for nm in cols:
X[nm] = _transform_y(X[nm].tolist(), lambdas_[nm])
return X if self.as_df else X.as_matrix()
def _transform_y(y, lam):
"""Transform a single y, given a single lambda value.
No validation performed.
Parameters
----------
y : array_like, shape (n_samples,)
The vector being transformed
lam : ndarray, shape (n_lambdas,)
The lambda value used for the transformation
"""
# ensure np array
y = np.array(y)
y_prime = np.array([(np.power(x, lam) - 1) / lam if not _eqls(lam, ZERO) else log(x) for x in y])
# rarely -- very rarely -- we can get a NaN. Why?
return y_prime
def _estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being estimated against
"""
# ensure is array
y = np.array(y)
# Use scipy's log-likelihood estimator
b = boxcox(y, lmbda=None)
# Return lambda corresponding to maximum P
return b[1]
class YeoJohnsonTransformer(BaseSkutil, TransformerMixin):
"""Estimate a lambda parameter for each feature, and transform
it to a distribution more-closely resembling a Gaussian bell
using the Yeo-Johnson transformation.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
lambda_ : dict
The lambda values corresponding to each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True):
super(YeoJohnsonTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
# ensure enough rows
_validate_rows(X)
# Now estimate the lambdas in parallel
self.lambda_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_yj_estimate_lambda_single_y)
(X[nm]) for nm in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'lambda_')
# check on state of X and cols
X, cols = validate_is_pd(X, self.cols, assert_all_finite=True) # creates a copy -- we need all to be finite
cols = _cols_if_none(X, self.cols)
lambdas_ = self.lambda_
# do transformations
for nm in cols:
X[nm] = _yj_transform_y(X[nm], lambdas_[nm])
return X if self.as_df else X.as_matrix()
def _yj_trans_single_x(x, lam):
if x >= 0:
# Case 1: x >= 0 and lambda is not 0
if not _eqls(lam, ZERO):
return (np.power(x + 1, lam) - 1.0) / lam
# Case 2: x >= 0 and lambda is zero
return log(x + 1)
else:
# Case 2: x < 0 and lambda is not two
if not lam == 2.0:
denom = 2.0 - lam
numer = np.power((-x + 1), (2.0 - lam)) - 1.0
return -numer / denom
# Case 4: x < 0 and lambda is two
return -log(-x + 1)
def _yj_transform_y(y, lam):
"""Transform a single y, given a single lambda value.
No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being transformed
lam : ndarray, shape (n_lambdas,)
The lambda value used for the transformation
"""
y = np.array(y)
return np.array([_yj_trans_single_x(x, lam) for x in y])
def _yj_estimate_lambda_single_y(y):
"""Estimate lambda for a single y, given a range of lambdas
through which to search. No validation performed.
Parameters
----------
y : ndarray, shape (n_samples,)
The vector being estimated against
"""
y = np.array(y)
# Use customlog-likelihood estimator
return _yj_normmax(y)
def _yj_normmax(x, brack=(-2, 2)):
"""Compute optimal YJ transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple
The starting interval for a downhill bracket search
"""
# Use MLE to compute the optimal YJ parameter
def _mle_opt(i, brck):
def _eval_mle(lmb, data):
# Function to minimize
return -_yj_llf(data, lmb)
return optimize.brent(_eval_mle, brack=brck, args=(i,))
return _mle_opt(x, brack) # _mle(x, brack)
def _yj_llf(data, lmb):
"""Transform a y vector given a single lambda value,
and compute the log-likelihood function. No validation
is applied to the input.
Parameters
----------
data : array_like
The vector to transform
lmb : scalar
The lambda value
"""
data = np.asarray(data)
N = data.shape[0]
y = _yj_transform_y(data, lmb)
# We can't take the canonical log of data, as there could be
# zeros or negatives. Thus, we need to shift both distributions
# up by some artbitrary factor just for the LLF computation
min_d, min_y = np.min(data), np.min(y)
if min_d < ZERO:
shift = np.abs(min_d) + 1
data += shift
# Same goes for Y
if min_y < ZERO:
shift = np.abs(min_y) + 1
y += shift
# Compute mean on potentially shifted data
y_mean = np.mean(y, axis=0)
var = np.sum((y - y_mean) ** 2. / N, axis=0)
# If var is 0.0, we'll get a warning. Means all the
# values were nearly identical in y, so we will return
# NaN so we don't optimize for this value of lam
if 0 == var:
return np.nan
# Can't use canonical log due to maybe negatives, so use the truncated log function in utils
llf = (lmb - 1) * np.sum(log(data), axis=0)
llf -= N / 2.0 * log(var)
return llf
class SpatialSignTransformer(BaseSkutil, TransformerMixin):
"""Project the feature space of a matrix into a multi-dimensional sphere
by dividing each feature by its squared norm.
Parameters
----------
cols : array_like, shape=(n_features,), optional (default=None)
The names of the columns on which to apply the transformation.
If no column names are provided, the transformer will be ``fit``
on the entire frame. Note that the transformation will also only
apply to the specified columns, and any other non-specified
columns will still be present after transformation. Note that since
this transformer can only operate on numeric columns, not explicitly
setting the ``cols`` parameter may result in errors for categorical data.
n_jobs : int, 1 by default
The number of jobs to use for the computation. This works by
estimating each of the feature lambdas in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but
one are used.
as_df : bool, optional (default=True)
Whether to return a Pandas ``DataFrame`` in the ``transform``
method. If False, will return a Numpy ``ndarray`` instead.
Since most skutil transformers depend on explicitly-named
``DataFrame`` features, the ``as_df`` parameter is True by default.
Attributes
----------
sq_nms_ : dict
The squared norms for each feature
"""
def __init__(self, cols=None, n_jobs=1, as_df=True):
super(SpatialSignTransformer, self).__init__(cols=cols, as_df=as_df)
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Fit the transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to fit. The frame will only
be fit on the prescribed ``cols`` (see ``__init__``) or
all of them if ``cols`` is None. Furthermore, ``X`` will
not be altered in the process of the fit.
y : None
Passthrough for ``sklearn.pipeline.Pipeline``. Even
if explicitly set, will not change behavior of ``fit``.
Returns
-------
self
"""
# check on state of X and cols
X, self.cols = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
# Now get sqnms in parallel
self.sq_nms_ = dict(zip(cols,
Parallel(n_jobs=self.n_jobs)(
delayed(_sq_norm_single)
(X[nm]) for nm in cols)))
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'sq_nms_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
sq_nms_ = self.sq_nms_
# scale by norms
for nm, the_norm in six.iteritems(sq_nms_):
X[nm] /= the_norm
return X if self.as_df else X.as_matrix()
def _sq_norm_single(x, zero_action=np.inf):
x = np.asarray(x)
nrm = np.dot(x, x)
# What if a squared norm is zero? We want to
# avoid a divide-by-zero situation...
return nrm if not nrm == 0 else zero_action
|
bsd-3-clause
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-biom-format/package.py
|
2
|
2316
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBiomFormat(PythonPackage):
"""The BIOM file format (canonically pronounced biome) is designed to be
a general-use format for representing biological sample by observation
contingency tables."""
homepage = "https://pypi.python.org/pypi/biom-format/2.1.6"
url = "https://pypi.io/packages/source/b/biom-format/biom-format-2.1.6.tar.gz"
version('2.1.6', '1dd4925b74c56e8ee864d5e1973068de')
variant('h5py', default=True, description='For use with BIOM 2.0+ files')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-cython', type='build')
depends_on('py-h5py', type=('build', 'run'), when='+h5py')
depends_on('py-click', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pyqi', type=('build', 'run'))
|
lgpl-2.1
|
danieldmm/minerva
|
evaluation/results_analysis.py
|
1
|
12728
|
#-------------------------------------------------------------------------------
# Name: results_analysis
# Purpose: Contains graphing and analysis functions to look at results
#
# Author: dd
#
# Created: 12/02/2015
# Copyright: (c) dd 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import print_function
import glob
import pandas
from pandas import Series, DataFrame
from scipy.stats.mstats import mode
import matplotlib.pyplot as plt
import seaborn as sns
import db.corpora as cp
from proc.general_utils import ensureTrailingBackslash, getFileDir, getFileName
from six.moves import range
##from testingPipeline5 import measureCitationResolution, AZ_ZONES_LIST, CORESC_LIST
def generateEqualWeights():
weights={x:INITIAL_VALUE for x in all_doc_methods[method]["runtime_parameters"]}
def statsOnResults(data, metric="avg_mrr"):
"""
Returns the mean of the top results that have the same number
"""
res={}
if len(data)==0:
return
val_to_match=data[metric].iloc[0]
index=0
while data[metric].iloc[index]==val_to_match:
index+=1
lines=data.iloc[:index]
means=lines.mean()
print("Averages:")
for zone in AZ_ZONES_LIST:
res[zone]=means[zone]
print(zone,":",means[zone])
return res
def drawSimilaritiesGraph(filename,metric, smooth_graph=False):
"""
Draws graph from CSV file
"""
dir=getFileDir(filename)
if dir=="":
filename=cp.Corpus.dir_output+filename
print("Drawing graph for",filename)
data=pandas.read_csv(filename)
## columns=AZ_ZONES_LIST+[metric]
columns=[u"ilc_CSC_"+zone for zone in CORESC_LIST]+[metric]
if columns[0] not in data.columns:
columns=[zone for zone in CORESC_LIST]+[metric]
if columns[0] not in data.columns:
columns=[zone for zone in AZ_ZONES_LIST]+[metric]
## columns=["OWN","OTH","CTR","BKG","AIM"]+[metric]
## columns=["OWN","OTH","CTR"]+[metric]
## f = lambda x: mode(x, axis=None)[0]
## print data[columns].head(20).apply(f)
## print data.describe()
numrows=data.shape[0] # (y,x)
## print data[columns].head(10).mean()
data=data.sort(metric, ascending=False)
# smoothing function
rows_to_group=100
## rows_to_group=numrows/700
f=lambda x:100-(x/rows_to_group)
results=[]
## index=0
## while index < numrows:
## means=data[columns].iloc[index:index+rows_to_group].mean()
#### print means[metric]
## index+=rows_to_group
## results.append(means)
## results.reverse()
if smooth_graph:
df=data[columns].groupby([f], sort=True)
results=df[columns].mean()
else:
data["g_"+metric]=data[metric]
results=data.groupby(["g_"+metric])[columns].mean()
## colors = ["windows blue", "amber", "greyish", "faded green", "dusty purple", "baby pink"]
## sns.palplot(sns.xkcd_palette(colors))
## flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71", "#002354", ""]
## sns.palplot(sns.color_palette(flatui))
## print sns.color_palette("Set2")
## sns.set_style("white")
sns.set_style("whitegrid")
## sns.set_style("whitegrid", {"grid.linewidth": .5})
## sns.set_context("talk")
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
## sns.set_palette(sns.color_palette("hls", 7))
## sns.set_palette(sns.color_palette("husl", 7))
## sns.set_palette("bright",n_colors=14,desat=0.9)
sns.set_palette(sns.color_palette("gist_rainbow", 11),11,0.9)
## sns.palplot(sns.color_palette())
## sns.palplot(sns.color_palette(n_colors=14) )
results_data=DataFrame(results)
## results_data.plot(x=metric, kind="line")
results_data[columns].plot(title=filename,fontsize=20, x=metric)
sns.despine()
def compareResultsBetweenSimilarities(filenames):
"""
Loads a CSV of parameter-based results, compares selected measures by
re-running them with a different simliarity, outputs results
"""
metric="precision_total"
## metric="avg_mrr"
for filename in filenames:
## data['precision_2'] = Series(0, index=data.index)
drawSimilaritiesGraph(filename,metric, True)
plt.show()
## plt.savefig()
def saveGraphForResults(filename,metric):
"""
"""
dir=ensureTrailingBackslash(getFileDir(filename))
drawSimilaritiesGraph(filename,metric,True)
name=getFileName(filename)
plt.savefig(dir+name+'.png', bbox_inches='tight')
plt.close()
def computeOverlap(filename, overlap_in="rank", overlap_between=["az_annotated_1_ALL","az_annotated_1_ALL_EXPLAIN"]):
"""
"""
data=pandas.read_csv(cp.Corpus.dir_output+filename)
## data['precision_2'] = Series(0, index=data.index)
## data=DataFrame(self.overall_results)
print(data.describe())
group=data.groupby(["file_guid","citation_id"])
all_overlaps=[]
for a,b in group:
numitems=b.shape[0]
results=[]
for i in range(numitems):
doc_method=b.iloc[i]["doc_method"]
rank=b.iloc[i][overlap_in]
if doc_method in overlap_between:
results.append(rank)
this_one=1
for index in range(len(results)-1):
if results[index] != results[index+1]:
this_one=0
break
all_overlaps.append(this_one)
print("Overlap between", overlap_between," in ",overlap_in,": %02.4f" % (sum(all_overlaps) / float(len(all_overlaps))))
def drawGraphOfScorePerMethod(data):
"""
"""
# !TODO IMPLEMENT
columns=[]
print(data.describe())
numrows=data.shape[0] # (y,x)
print(data[columns].head(10).mean())
data=data.sort(metric, ascending=False)
rows_to_group=100
f=lambda x:100-(x/rows_to_group)
results=[]
## index=0
## while index < numrows:
## means=data[columns].iloc[index:index+rows_to_group].mean()
#### print means[metric]
## index+=rows_to_group
## results.append(means)
## results.reverse()
## data["g_"+metric]=data[metric]
## results=data.groupby(["g_"+metric])[columns].mean()
results=data[columns].groupby([f], sort=True)[columns].mean()
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_palette("bright",8,0.9)
results_data=DataFrame(results)
## results_data.plot(x=metric, kind="line")
results_data[columns].plot(title=filename,fontsize=20, x=metric)
sns.despine()
def compareResultsBetweenMethods(filename, metric="precision_total"):
"""
"""
# !TODO IMPLEMENT
data=pandas.read_csv(cp.Corpus.dir_output+filename)
## metric="avg_mrr"
for filename in filenames:
## data['precision_2'] = Series(0, index=data.index)
drawSimilaritiesGraph(filename,metric)
plt.show()
def drawNewSimilaritiesGraph(filename, metric, single_out="OWN", smooth_graph=False):
"""
Draws graph from CSV file
"""
data=pandas.read_csv(cp.Corpus.dir_output+filename)
columns=AZ_ZONES_LIST+[metric]
## f = lambda x: mode(x, axis=None)[0]
## print data[columns].head(20).apply(f)
for zone in AZ_ZONES_LIST:
data["pct_"+zone]=data[zone]/data[AZ_ZONES_LIST].sum(axis=1)
columns.append("pct_"+zone)
print(data.describe())
numrows=data.shape[0] # (y,x)
print(data[columns].head(10).mean())
data=data.sort(metric, ascending=False)
# smoothing function
rows_to_group=100
f=lambda x:100-(x/rows_to_group)
results=[]
if smooth_graph:
results=data[columns].groupby([f], sort=True)[columns].mean()
else:
data["g_"+metric]=data[metric]
results=data.groupby(["g_"+metric])[columns].mean()
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_palette("bright",8,0.9)
results_data=DataFrame(results)
## results_data.plot(x=metric, kind="line")
ax=results_data[["pct_"+single_out,metric]].plot(title=filename,fontsize=20, y=metric, x="pct_"+single_out)
ax.set_ylabel(metric)
sns.despine()
def compareNewSimilaritiesGraph(filenames):
"""
Loads a CSV of parameter-based results, compares selected measures by
re-running them with a different simliarity, outputs results
"""
metric="precision_total"
## metric="avg_mrr"
for filename in filenames:
## data['precision_2'] = Series(0, index=data.index)
drawNewSimilaritiesGraph(filename,metric, "BKG",True)
plt.show()
def makeAllGraphsForExperiment(exp_dir):
"""
Iterates through all weight*.csv files in the experiment's directory and
saves a graph for each
"""
## metric="avg_mrr"
## metric="precision_total"
metric="avg_precision"
exp_dir=ensureTrailingBackslash(exp_dir)
## working_dir=Corpus.dir_experiments+exp_name+os.sep
for path in glob.glob(exp_dir+"weight*.csv"):
saveGraphForResults(path,metric)
def drawWeights(exp,weights,name):
"""
"""
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_palette(sns.color_palette("gist_rainbow", 11),11,0.9)
results_data=DataFrame(weights,[0])
results_data.plot(kind="bar",title="weights",fontsize=20, ylim=(-15,15))
sns.despine()
## plt.show()
## name=getFileName(name)
plt.savefig(exp["exp_dir"]+name+'.png', bbox_inches='tight')
plt.close()
def drawScoreProgression(exp,scores,name):
"""
"""
sns.set_style("whitegrid")
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_palette(sns.color_palette("gist_rainbow", 11),11,0.9)
print(scores)
results_data=DataFrame(scores)
results_data.plot(kind="line",title="scores",fontsize=20, xlim=(0,8))
sns.despine()
## plt.show()
plt.savefig(exp["exp_dir"]+str(name)+'.png', bbox_inches='tight')
plt.close()
def main():
## filenames=["weights_OWN_max2_inc2.00_ini1_FA_bulkScorer.csv","weights_OWN_max2_inc1.00_ini1_BooleanQuery.csv"]
## filenames=["weights_OWN_['1', '3', '5']_FA_bulkScorer_first1000.csv","weights_OWN_['1', '3', '5']_FA_bulkScorer_second1000.csv"]
## filenames=["weights_CTR_max2_inc1.00_ini1.csv"]
## filenames=["weights_BKG_max2_inc1.00_ini1.csv"]
## filenames=["weights_OTH_['1', '3', '5']_FA_BulkScorer_first650.csv","weights_OTH_['1', '3', '5']_FA_BulkScorer_second650.csv"]
## filenames=["weights_OWN_['1', '3', '5']test_optimized.csv","weights_OWN_['1', '3', '5']_FA_bulkScorer_second1000.csv"]
## filenames=["weights_OWN_['1', '3', '5']test_optimized_defaultsim_first1000.csv","weights_OWN_['1', '3', '5']test_optimized_defaultsim_second1000.csv"]
## filenames=["weights_OWN_['1', '3', '5']_FA_bulkScorer_first1000.csv","weights_OWN_['1', '3', '5', '7', '9']test_optimized_fa_first1000.csv"]
filenames=["weights_OWN_['1', '3', '5', '7', '9']test_optimized_fa_first1000.csv","weights_OWN_['1', '3', '5', '7', '9']test_optimized_fa_second1000.csv"]
filenames=["weights_OWN_['1', '3', '5', '7', '9']test_optimized_fa_second1000.csv","weights_OWN_[1, 3, 5, 7]test_optimized_fa_third1000.csv"]
filenames=["weights_Hyp_[1, 5]_s1.csv","weights_Mot_[1, 5]_s1.csv"]
filenames=["weights_Bac_[1, 5]_s1.csv","weights_Goa_[1, 5]_s1.csv"]
filenames=[r"C:\NLP\PhD\bob\experiments\w20_ilcpar_csc_fa_w0135\weights_Bac_[1, 5]_s2.csv"]
## compareResultsBetweenSimilarities(filenames)
## compareNewSimilaritiesGraph(filenames[:1])
## compareResultsBetweenSimilarities("weights_OWN_max2_inc2.00_ini1_FA_bulkScorer.csv")
## compareResultsBetweenSimilarities("weights_OWN_max2_inc1.00_ini1_BooleanQuery.csv")
## compareResultsBetweenSimilarities("weights_AIM_max3_inc1.00_ini1.csv")
## compareResultsBetweenSimilarities("weights_BKG_max2_inc1.00_ini1.csv")
## compareResultsBetweenSimilarities("weights_CTR_max2_inc1.00_ini1.csv")
## compareResultsBetweenSimilarities("weights_BAS_max2_inc1.00_ini1.csv")
## computeOverlap("overlap_bulkScorer_explain.csv", overlap_in="precision_score")
## makeAllGraphsForExperiment(r"C:\NLP\PhD\bob\experiments\w20_csc_csc_fa_w0135")
## makeAllGraphsForExperiment(r"C:\NLP\PhD\bob\experiments\w20_ilcpar_csc_fa_w0135")
## drawWeights("",{"AIM":9,"BAS":5,"BKG":1,"CTR":9,"OTH":0,"OWN":1,"TXT":1})
drawScoreProgression({"exp_dir":r"C:\NLP\PhD\bob\experiments\w20_az_az_fa\\"},[1,2,3,4,4,4,5,6],0)
pass
if __name__ == '__main__':
main()
|
gpl-3.0
|
mumuwoyou/vnpy
|
vn.datayes/api.py
|
10
|
45360
|
#encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
|
mit
|
jazcollins/models
|
cognitive_mapping_and_planning/src/map_utils.py
|
9
|
9602
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function to compute the ground truth map for training etc.
"""
import copy
import skimage.morphology
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import PIL
import src.utils as utils
import cv2
def _get_xy_bounding_box(vertex, padding):
"""Returns the xy bounding box of the environment."""
min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int)
max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int)
return min_, max_
def _project_to_map(map, vertex, wt=None, ignore_points_outside_map=False):
"""Projects points to map, returns how many points are present at each
location."""
num_points = np.zeros((map.size[1], map.size[0]))
vertex_ = vertex[:, :2] - map.origin
vertex_ = np.round(vertex_ / map.resolution).astype(np.int)
if ignore_points_outside_map:
good_ind = np.all(np.array([vertex_[:,1] >= 0, vertex_[:,1] < map.size[1],
vertex_[:,0] >= 0, vertex_[:,0] < map.size[0]]),
axis=0)
vertex_ = vertex_[good_ind, :]
if wt is not None:
wt = wt[good_ind, :]
if wt is None:
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), 1)
else:
assert(wt.shape[0] == vertex.shape[0]), \
'number of weights should be same as vertices.'
np.add.at(num_points, (vertex_[:, 1], vertex_[:, 0]), wt)
return num_points
def make_map(padding, resolution, vertex=None, sc=1.):
"""Returns a map structure."""
min_, max_ = _get_xy_bounding_box(vertex*sc, padding=padding)
sz = np.ceil((max_ - min_ + 1) / resolution).astype(np.int32)
max_ = min_ + sz * resolution - 1
map = utils.Foo(origin=min_, size=sz, max=max_, resolution=resolution,
padding=padding)
return map
def _fill_holes(img, thresh):
"""Fills holes less than thresh area (assumes 4 connectivity when computing
hole area."""
l, n = scipy.ndimage.label(np.logical_not(img))
img_ = img == True
cnts = np.bincount(l.reshape(-1))
for i, cnt in enumerate(cnts):
if cnt < thresh:
l[l == i] = -1
img_[l == -1] = True
return img_
def compute_traversibility(map, robot_base, robot_height, robot_radius,
valid_min, valid_max, num_point_threshold, shapess,
sc=100., n_samples_per_face=200):
"""Returns a bit map with pixels that are traversible or not as long as the
robot center is inside this volume we are good colisions can be detected by
doing a line search on things, or walking from current location to final
location in the bitmap, or doing bwlabel on the traversibility map."""
tt = utils.Timer()
tt.tic()
num_obstcale_points = np.zeros((map.size[1], map.size[0]))
num_points = np.zeros((map.size[1], map.size[0]))
for i, shapes in enumerate(shapess):
for j in range(shapes.get_number_of_meshes()):
p, face_areas, face_idx = shapes.sample_points_on_face_of_shape(
j, n_samples_per_face, sc)
wt = face_areas[face_idx]/n_samples_per_face
ind = np.all(np.concatenate(
(p[:, [2]] > robot_base,
p[:, [2]] < robot_base + robot_height), axis=1),axis=1)
num_obstcale_points += _project_to_map(map, p[ind, :], wt[ind])
ind = np.all(np.concatenate(
(p[:, [2]] > valid_min,
p[:, [2]] < valid_max), axis=1),axis=1)
num_points += _project_to_map(map, p[ind, :], wt[ind])
selem = skimage.morphology.disk(robot_radius / map.resolution)
obstacle_free = skimage.morphology.binary_dilation(
_fill_holes(num_obstcale_points > num_point_threshold, 20), selem) != True
valid_space = _fill_holes(num_points > num_point_threshold, 20)
traversible = np.all(np.concatenate((obstacle_free[...,np.newaxis],
valid_space[...,np.newaxis]), axis=2),
axis=2)
# plt.imshow(np.concatenate((obstacle_free, valid_space, traversible), axis=1))
# plt.show()
map_out = copy.deepcopy(map)
map_out.num_obstcale_points = num_obstcale_points
map_out.num_points = num_points
map_out.traversible = traversible
map_out.obstacle_free = obstacle_free
map_out.valid_space = valid_space
tt.toc(log_at=1, log_str='src.map_utils.compute_traversibility: ')
return map_out
def resize_maps(map, map_scales, resize_method):
scaled_maps = []
for i, sc in enumerate(map_scales):
if resize_method == 'antialiasing':
# Resize using open cv so that we can compute the size.
# Use PIL resize to use anti aliasing feature.
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
w = map_.shape[1]; h = map_.shape[0]
map_img = PIL.Image.fromarray((map*255).astype(np.uint8))
map__img = map_img.resize((w,h), PIL.Image.ANTIALIAS)
map_ = np.asarray(map__img).astype(np.float32)
map_ = map_/255.
map_ = np.minimum(map_, 1.0)
map_ = np.maximum(map_, 0.0)
elif resize_method == 'linear_noantialiasing':
map_ = cv2.resize(map*1, None, None, fx=sc, fy=sc, interpolation=cv2.INTER_LINEAR)
else:
logging.error('Unknown resizing method')
scaled_maps.append(map_)
return scaled_maps
def pick_largest_cc(traversible):
out = scipy.ndimage.label(traversible)[0]
cnt = np.bincount(out.reshape(-1))[1:]
return out == np.argmax(cnt) + 1
def get_graph_origin_loc(rng, traversible):
"""Erode the traversibility mask so that we get points in the bulk of the
graph, and not end up with a situation where the graph is localized in the
corner of a cramped room. Output Locs is in the coordinate frame of the
map."""
aa = pick_largest_cc(skimage.morphology.binary_erosion(traversible == True,
selem=np.ones((15,15))))
y, x = np.where(aa > 0)
ind = rng.choice(y.size)
locs = np.array([x[ind], y[ind]])
locs = locs + rng.rand(*(locs.shape)) - 0.5
return locs
def generate_egocentric_maps(scaled_maps, map_scales, map_crop_sizes, loc,
x_axis, y_axis, theta):
maps = []
for i, (map_, sc, map_crop_size) in enumerate(zip(scaled_maps, map_scales, map_crop_sizes)):
maps_i = np.array(get_map_to_predict(loc*sc, x_axis, y_axis, map_,
map_crop_size,
interpolation=cv2.INTER_LINEAR)[0])
maps_i[np.isnan(maps_i)] = 0
maps.append(maps_i)
return maps
def generate_goal_images(map_scales, map_crop_sizes, n_ori, goal_dist,
goal_theta, rel_goal_orientation):
goal_dist = goal_dist[:,0]
goal_theta = goal_theta[:,0]
rel_goal_orientation = rel_goal_orientation[:,0]
goals = [];
# Generate the map images.
for i, (sc, map_crop_size) in enumerate(zip(map_scales, map_crop_sizes)):
goal_i = np.zeros((goal_dist.shape[0], map_crop_size, map_crop_size, n_ori),
dtype=np.float32)
x = goal_dist*np.cos(goal_theta)*sc + (map_crop_size-1.)/2.
y = goal_dist*np.sin(goal_theta)*sc + (map_crop_size-1.)/2.
for j in range(goal_dist.shape[0]):
gc = rel_goal_orientation[j]
x0 = np.floor(x[j]).astype(np.int32); x1 = x0 + 1;
y0 = np.floor(y[j]).astype(np.int32); y1 = y0 + 1;
if x0 >= 0 and x0 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x0, gc] = (x1-x[j])*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x0, gc] = (x1-x[j])*(y[j]-y0)
if x1 >= 0 and x1 <= map_crop_size-1:
if y0 >= 0 and y0 <= map_crop_size-1:
goal_i[j, y0, x1, gc] = (x[j]-x0)*(y1-y[j])
if y1 >= 0 and y1 <= map_crop_size-1:
goal_i[j, y1, x1, gc] = (x[j]-x0)*(y[j]-y0)
goals.append(goal_i)
return goals
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
interpolation=cv2.INTER_LINEAR):
fss = []
valids = []
center = (map_size-1.0)/2.0
dst_theta = np.pi/2.0
dst_loc = np.array([center, center])
dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])
def compute_points(center, x_axis, y_axis):
points = np.zeros((3,2),dtype=np.float32)
points[0,:] = center
points[1,:] = center + x_axis
points[2,:] = center + y_axis
return points
dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
for i in range(src_locs.shape[0]):
src_loc = src_locs[i,:]
src_x_axis = src_x_axiss[i,:]
src_y_axis = src_y_axiss[i,:]
src_points = compute_points(src_loc, src_x_axis, src_y_axis)
M = cv2.getAffineTransform(src_points, dst_points)
fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
borderValue=np.NaN)
valid = np.invert(np.isnan(fs))
valids.append(valid)
fss.append(fs)
return fss, valids
|
apache-2.0
|
scw/geopandas
|
tests/test_geodataframe.py
|
7
|
19172
|
from __future__ import absolute_import
import json
import os
import tempfile
import shutil
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from shapely.geometry import Point, Polygon
import fiona
from geopandas import GeoDataFrame, read_file, GeoSeries
from .util import unittest, download_nybb, assert_geoseries_equal, connect, \
create_db, validate_boro_df, PANDAS_NEW_SQL_API
class TestDataFrame(unittest.TestCase):
def setUp(self):
N = 10
nybb_filename = download_nybb()
self.df = read_file('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename)
with fiona.open('/nybb_14a_av/nybb.shp', vfs='zip://' + nybb_filename) as f:
self.schema = f.schema
self.tempdir = tempfile.mkdtemp()
self.boros = self.df['BoroName']
self.crs = {'init': 'epsg:4326'}
self.df2 = GeoDataFrame([
{'geometry': Point(x, y), 'value1': x + y, 'value2': x * y}
for x, y in zip(range(N), range(N))], crs=self.crs)
self.df3 = read_file('examples/null_geom.geojson')
self.line_paths = self.df3['Name']
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_df_init(self):
self.assertTrue(type(self.df2) is GeoDataFrame)
self.assertTrue(self.df2.crs == self.crs)
def test_different_geo_colname(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
locs = GeoSeries(data['location'], crs=self.crs)
assert_geoseries_equal(df.geometry, locs)
self.assert_('geometry' not in df)
self.assertEqual(df.geometry.name, 'location')
# internal implementation detail
self.assertEqual(df._geometry_column_name, 'location')
geom2 = [Point(x, y) for x, y in zip(range(5, 10), range(5))]
df2 = df.set_geometry(geom2, crs='dummy_crs')
self.assert_('geometry' in df2)
self.assert_('location' in df2)
self.assertEqual(df2.crs, 'dummy_crs')
self.assertEqual(df2.geometry.crs, 'dummy_crs')
# reset so it outputs okay
df2.crs = df.crs
assert_geoseries_equal(df2.geometry, GeoSeries(geom2, crs=df2.crs))
# for right now, non-geometry comes back as series
assert_geoseries_equal(df2['location'], df['location'],
check_series_type=False, check_dtype=False)
def test_geo_getitem(self):
data = {"A": range(5), "B": range(-5, 0),
"location": [Point(x, y) for x, y in zip(range(5), range(5))]}
df = GeoDataFrame(data, crs=self.crs, geometry='location')
self.assert_(isinstance(df.geometry, GeoSeries))
df['geometry'] = df["A"]
self.assert_(isinstance(df.geometry, GeoSeries))
self.assertEqual(df.geometry[0], data['location'][0])
# good if this changed in the future
self.assert_(not isinstance(df['geometry'], GeoSeries))
self.assert_(isinstance(df['location'], GeoSeries))
data["geometry"] = [Point(x + 1, y - 1) for x, y in zip(range(5), range(5))]
df = GeoDataFrame(data, crs=self.crs)
self.assert_(isinstance(df.geometry, GeoSeries))
self.assert_(isinstance(df['geometry'], GeoSeries))
# good if this changed in the future
self.assert_(not isinstance(df['location'], GeoSeries))
def test_geometry_property(self):
assert_geoseries_equal(self.df.geometry, self.df['geometry'],
check_dtype=True, check_index_type=True)
df = self.df.copy()
new_geom = [Point(x, y) for x, y in zip(range(len(self.df)),
range(len(self.df)))]
df.geometry = new_geom
new_geom = GeoSeries(new_geom, index=df.index, crs=df.crs)
assert_geoseries_equal(df.geometry, new_geom)
assert_geoseries_equal(df['geometry'], new_geom)
# new crs
gs = GeoSeries(new_geom, crs="epsg:26018")
df.geometry = gs
self.assertEqual(df.crs, "epsg:26018")
def test_geometry_property_errors(self):
with self.assertRaises(AttributeError):
df = self.df.copy()
del df['geometry']
df.geometry
# list-like error
with self.assertRaises(ValueError):
df = self.df2.copy()
df.geometry = 'value1'
# list-like error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = 'apple'
# non-geometry error
with self.assertRaises(TypeError):
df = self.df.copy()
df.geometry = list(range(df.shape[0]))
with self.assertRaises(KeyError):
df = self.df.copy()
del df['geometry']
df['geometry']
# ndim error
with self.assertRaises(ValueError):
df = self.df.copy()
df.geometry = df
def test_set_geometry(self):
geom = GeoSeries([Point(x, y) for x, y in zip(range(5), range(5))])
original_geom = self.df.geometry
df2 = self.df.set_geometry(geom)
self.assert_(self.df is not df2)
assert_geoseries_equal(df2.geometry, geom)
assert_geoseries_equal(self.df.geometry, original_geom)
assert_geoseries_equal(self.df['geometry'], self.df.geometry)
# unknown column
with self.assertRaises(ValueError):
self.df.set_geometry('nonexistent-column')
# ndim error
with self.assertRaises(ValueError):
self.df.set_geometry(self.df)
# new crs - setting should default to GeoSeries' crs
gs = GeoSeries(geom, crs="epsg:26018")
new_df = self.df.set_geometry(gs)
self.assertEqual(new_df.crs, "epsg:26018")
# explicit crs overrides self and dataframe
new_df = self.df.set_geometry(gs, crs="epsg:27159")
self.assertEqual(new_df.crs, "epsg:27159")
self.assertEqual(new_df.geometry.crs, "epsg:27159")
# Series should use dataframe's
new_df = self.df.set_geometry(geom.values)
self.assertEqual(new_df.crs, self.df.crs)
self.assertEqual(new_df.geometry.crs, self.df.crs)
def test_set_geometry_col(self):
g = self.df.geometry
g_simplified = g.simplify(100)
self.df['simplified_geometry'] = g_simplified
df2 = self.df.set_geometry('simplified_geometry')
# Drop is false by default
self.assert_('simplified_geometry' in df2)
assert_geoseries_equal(df2.geometry, g_simplified)
# If True, drops column and renames to geometry
df3 = self.df.set_geometry('simplified_geometry', drop=True)
self.assert_('simplified_geometry' not in df3)
assert_geoseries_equal(df3.geometry, g_simplified)
def test_set_geometry_inplace(self):
geom = [Point(x, y) for x, y in zip(range(5), range(5))]
ret = self.df.set_geometry(geom, inplace=True)
self.assert_(ret is None)
geom = GeoSeries(geom, index=self.df.index, crs=self.df.crs)
assert_geoseries_equal(self.df.geometry, geom)
def test_set_geometry_series(self):
# Test when setting geometry with a Series that
# alignment will occur
#
# Reverse the index order
# Set the Series to be Point(i,i) where i is the index
self.df.index = range(len(self.df)-1, -1, -1)
d = {}
for i in range(len(self.df)):
d[i] = Point(i, i)
g = GeoSeries(d)
# At this point, the DataFrame index is [4,3,2,1,0] and the
# GeoSeries index is [0,1,2,3,4]. Make sure set_geometry aligns
# them to match indexes
df = self.df.set_geometry(g)
for i, r in df.iterrows():
self.assertAlmostEqual(i, r['geometry'].x)
self.assertAlmostEqual(i, r['geometry'].y)
def test_to_json(self):
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_geom_col(self):
df = self.df.copy()
df['geom'] = df['geometry']
df['geometry'] = np.arange(len(df))
df.set_geometry('geom', inplace=True)
text = df.to_json()
data = json.loads(text)
self.assertTrue(data['type'] == 'FeatureCollection')
self.assertTrue(len(data['features']) == 5)
def test_to_json_na(self):
# Set a value as nan and make sure it's written
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
text = self.df.to_json()
data = json.loads(text)
self.assertTrue(len(data['features']) == 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(props['Shape_Area'] is None)
def test_to_json_bad_na(self):
# Check that a bad na argument raises error
with self.assertRaises(ValueError):
text = self.df.to_json(na='garbage')
def test_to_json_dropna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='drop')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
if props['BoroName'] == 'Queens':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Area' not in props)
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertEqual(len(props), 3)
self.assertTrue('Shape_Leng' not in props)
self.assertTrue('Shape_Area' in props)
else:
self.assertEqual(len(props), 4)
def test_to_json_keepna(self):
self.df.loc[self.df['BoroName']=='Queens', 'Shape_Area'] = np.nan
self.df.loc[self.df['BoroName']=='Bronx', 'Shape_Leng'] = np.nan
text = self.df.to_json(na='keep')
data = json.loads(text)
self.assertEqual(len(data['features']), 5)
for f in data['features']:
props = f['properties']
self.assertEqual(len(props), 4)
if props['BoroName'] == 'Queens':
self.assertTrue(np.isnan(props['Shape_Area']))
# Just make sure setting it to nan in a different row
# doesn't affect this one
self.assertTrue('Shape_Leng' in props)
elif props['BoroName'] == 'Bronx':
self.assertTrue(np.isnan(props['Shape_Leng']))
self.assertTrue('Shape_Area' in props)
def test_copy(self):
df2 = self.df.copy()
self.assertTrue(type(df2) is GeoDataFrame)
self.assertEqual(self.df.crs, df2.crs)
def test_to_file(self):
""" Test to_file and from_file """
tempfilename = os.path.join(self.tempdir, 'boros.shp')
self.df.to_file(tempfilename)
# Read layer back in
df = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df)
self.assertTrue(len(df) == 5)
self.assertTrue(np.alltrue(df['BoroName'].values == self.boros))
# Write layer with null geometry out to file
tempfilename = os.path.join(self.tempdir, 'null_geom.shp')
self.df3.to_file(tempfilename)
# Read layer back in
df3 = GeoDataFrame.from_file(tempfilename)
self.assertTrue('geometry' in df3)
self.assertTrue(len(df3) == 2)
self.assertTrue(np.alltrue(df3['Name'].values == self.line_paths))
def test_to_file_types(self):
""" Test various integer type columns (GH#93) """
tempfilename = os.path.join(self.tempdir, 'int.shp')
int_types = [np.int, np.int8, np.int16, np.int32, np.int64, np.intp,
np.uint8, np.uint16, np.uint32, np.uint64, np.long]
geometry = self.df2.geometry
data = dict((str(i), np.arange(len(geometry), dtype=dtype))
for i, dtype in enumerate(int_types))
df = GeoDataFrame(data, geometry=geometry)
df.to_file(tempfilename)
def test_mixed_types_to_file(self):
""" Test that mixed geometry types raise error when writing to file """
tempfilename = os.path.join(self.tempdir, 'test.shp')
s = GeoDataFrame({'geometry': [Point(0, 0),
Polygon([(0, 0), (1, 0), (1, 1)])]})
with self.assertRaises(ValueError):
s.to_file(tempfilename)
def test_to_file_schema(self):
"""
Ensure that the file is written according to the schema
if it is specified
"""
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
tempfilename = os.path.join(self.tempdir, 'test.shp')
properties = OrderedDict([
('Shape_Leng', 'float:19.11'),
('BoroName', 'str:40'),
('BoroCode', 'int:10'),
('Shape_Area', 'float:19.11'),
])
schema = {'geometry': 'Polygon', 'properties': properties}
# Take the first 2 features to speed things up a bit
self.df.iloc[:2].to_file(tempfilename, schema=schema)
with fiona.open(tempfilename) as f:
result_schema = f.schema
self.assertEqual(result_schema, schema)
def test_bool_index(self):
# Find boros with 'B' in their name
df = self.df[self.df['BoroName'].str.contains('B')]
self.assertTrue(len(df) == 2)
boros = df['BoroName'].values
self.assertTrue('Brooklyn' in boros)
self.assertTrue('Bronx' in boros)
self.assertTrue(type(df) is GeoDataFrame)
def test_transform(self):
df2 = self.df2.copy()
df2.crs = {'init': 'epsg:26918', 'no_defs': True}
lonlat = df2.to_crs(epsg=4326)
utm = lonlat.to_crs(epsg=26918)
self.assertTrue(all(df2['geometry'].geom_almost_equals(utm['geometry'], decimal=2)))
def test_from_features(self):
nybb_filename = download_nybb()
with fiona.open('/nybb_14a_av/nybb.shp',
vfs='zip://' + nybb_filename) as f:
features = list(f)
crs = f.crs
df = GeoDataFrame.from_features(features, crs=crs)
df.rename(columns=lambda x: x.lower(), inplace=True)
validate_boro_df(self, df)
self.assert_(df.crs == crs)
def test_from_features_unaligned_properties(self):
p1 = Point(1, 1)
f1 = {'type': 'Feature',
'properties': {'a': 0},
'geometry': p1.__geo_interface__}
p2 = Point(2, 2)
f2 = {'type': 'Feature',
'properties': {'b': 1},
'geometry': p2.__geo_interface__}
p3 = Point(3, 3)
f3 = {'type': 'Feature',
'properties': {'a': 2},
'geometry': p3.__geo_interface__}
df = GeoDataFrame.from_features([f1, f2, f3])
result = df[['a', 'b']]
expected = pd.DataFrame.from_dict([{'a': 0, 'b': np.nan},
{'a': np.nan, 'b': 1},
{'a': 2, 'b': np.nan}])
assert_frame_equal(expected, result)
def test_from_postgis_default(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = "SELECT * FROM nybb;"
df = GeoDataFrame.from_postgis(sql, con)
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_from_postgis_custom_geom_col(self):
con = connect('test_geopandas')
if con is None or not create_db(self.df):
raise unittest.case.SkipTest()
try:
sql = """SELECT
borocode, boroname, shape_leng, shape_area,
geom AS __geometry__
FROM nybb;"""
df = GeoDataFrame.from_postgis(sql, con, geom_col='__geometry__')
finally:
if PANDAS_NEW_SQL_API:
# It's not really a connection, it's an engine
con = con.connect()
con.close()
validate_boro_df(self, df)
def test_dataframe_to_geodataframe(self):
df = pd.DataFrame({"A": range(len(self.df)), "location":
list(self.df.geometry)}, index=self.df.index)
gf = df.set_geometry('location', crs=self.df.crs)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf, GeoDataFrame)
assert_geoseries_equal(gf.geometry, self.df.geometry)
self.assertEqual(gf.geometry.name, 'location')
self.assert_('geometry' not in gf)
gf2 = df.set_geometry('location', crs=self.df.crs, drop=True)
self.assertIsInstance(df, pd.DataFrame)
self.assertIsInstance(gf2, GeoDataFrame)
self.assertEqual(gf2.geometry.name, 'geometry')
self.assert_('geometry' in gf2)
self.assert_('location' not in gf2)
self.assert_('location' in df)
# should be a copy
df.ix[0, "A"] = 100
self.assertEqual(gf.ix[0, "A"], 0)
self.assertEqual(gf2.ix[0, "A"], 0)
with self.assertRaises(ValueError):
df.set_geometry('location', inplace=True)
def test_geodataframe_geointerface(self):
self.assertEqual(self.df.__geo_interface__['type'], 'FeatureCollection')
self.assertEqual(len(self.df.__geo_interface__['features']),
self.df.shape[0])
def test_geodataframe_geojson_no_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=False)
self.assertFalse('bbox' in geo.keys())
for feature in geo['features']:
self.assertFalse('bbox' in feature.keys())
def test_geodataframe_geojson_bbox(self):
geo = self.df._to_geo(na="null", show_bbox=True)
self.assertTrue('bbox' in geo.keys())
self.assertEqual(len(geo['bbox']), 4)
self.assertTrue(isinstance(geo['bbox'], tuple))
for feature in geo['features']:
self.assertTrue('bbox' in feature.keys())
def test_pickle(self):
filename = os.path.join(self.tempdir, 'df.pkl')
self.df.to_pickle(filename)
unpickled = pd.read_pickle(filename)
assert_frame_equal(self.df, unpickled)
self.assertEqual(self.df.crs, unpickled.crs)
|
bsd-3-clause
|
wohlert/agnosia
|
procedures/gen_img.py
|
2
|
1280
|
import sys
import numpy as np
import pandas as pd
import atone.io as io
from atone.pipeline import Pipeline
from atone.preprocessing import scale, cut, get_magnetometers, min_max, keep_channels
from atone.imaging import spatial_transforms, generate_images
filename = str(sys.argv[1])
data_dir = "data/"
magnetometers = get_magnetometers("./channel_names.npy")
# Load sensor map and create spatial transform
positions = io.load_positions(data_dir + "sensorspace.mat")
positions = positions[magnetometers]
coordinates = spatial_transforms(positions)
# Load subject data and metadata
X, y, names = io.load_subject(data_dir + filename)
sfreq, tmin, _ = io.load_meta(data_dir)
onset = sfreq * abs(tmin)
def erp_topography(input_matrix: np.array):
trials, channels, samples = input_matrix.shape
n170 = int(sfreq*0.170)
output = input_matrix[:, :, n170]
return output.reshape(trials, channels)
# Number of frames to split data into
pipe = Pipeline()
pipe.add(scale)
pipe.add(cut, [onset])
pipe.add(keep_channels, [magnetometers])
pipe.add(erp_topography)
X = pipe.run(X)
X = np.apply_along_axis(min_max, 1, X)
# Generate image files
# Example image file: "images/train_subject1/trial22.1.jpeg"
generate_images(X, coordinates, "bw/", names, resolution=100)
|
apache-2.0
|
pypot/scikit-learn
|
examples/cluster/plot_digits_linkage.py
|
369
|
2959
|
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
|
bsd-3-clause
|
avmarchenko/exatomic
|
exatomic/algorithms/orbital_util.py
|
3
|
13674
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
'''
Molecular Orbital Utilities
##############################
Molecular orbitals are constructed symbolically
then evaluated on a numerical grid.
These are their stories.
'''
from __future__ import division
import six
import numpy as np
import pandas as pd
from numba import jit
from numexpr import evaluate
from IPython.display import display
from ipywidgets import FloatProgress
from exatomic.core.field import AtomicField
from exatomic.base import nbpll
def compare_fields(uni0, uni1, rtol=5e-5, atol=1e-12, signed=True, verbose=True):
"""Compare field values of differenct universes.
It is expected that fields are in the same order.
Args:
uni0 (:class:`exatomic.core.universe.Universe`): first universe
uni1 (:class:`exatomic.core.universe.Universe`): second universe
rtol (float): relative tolerance passed to numpy.isclose
atol (float): absolute tolerance passed to numpy.isclose
signed (bool): opposite signs are counted as different (default True)
verbose (bool): print how close the fields are to each other numerically (default True)
Returns:
fracs (list): list of fractions measuring closeness of fields
"""
fracs, kws = [], {'rtol': rtol, 'atol': atol}
for i, (f0, f1) in enumerate(zip(uni0.field.field_values,
uni1.field.field_values)):
n = np.isclose(f0, f1, **kws).sum()
if not signed: n = max(n, np.isclose(f0, -f1, **kws).sum())
fracs.append(n / f0.shape[0])
if verbose:
fmt = '{{:<{}}}:{{:>9}}'.format(len(str(len(fracs))) + 1)
print(fmt.format(len(fracs), 'Fraction'))
fmt = fmt.replace('9', '9.5f')
for i, f in enumerate(fracs):
print(fmt.format(i, f))
return fracs
def numerical_grid_from_field_params(fps):
"""Construct numerical grid arrays from field parameters.
Args:
fps (pd.Series): See :meth:`exatomic.algorithms.orbital_util.make_fps`
Returns:
grid (tup): (xs, ys, zs) 1D-arrays
"""
if isinstance(fps, pd.DataFrame):
fps = fps.loc[0]
ox, nx, dx = fps.ox, int(fps.nx), fps.dxi
oy, ny, dy = fps.oy, int(fps.ny), fps.dyj
oz, nz, dz = fps.oz, int(fps.nz), fps.dzk
mx = ox + (nx - 1) * dx
my = oy + (ny - 1) * dy
mz = oz + (nz - 1) * dz
x = np.linspace(ox, mx, nx)
y = np.linspace(oy, my, ny)
z = np.linspace(oz, mz, nz)
return _meshgrid3d(x, y, z)
def make_fps(rmin=None, rmax=None, nr=None, nrfps=1,
xmin=None, xmax=None, nx=None, frame=0,
ymin=None, ymax=None, ny=None, field_type=0,
zmin=None, zmax=None, nz=None, label=0,
ox=None, fx=None, dxi=None, dxj=None, dxk=None,
oy=None, fy=None, dyi=None, dyj=None, dyk=None,
oz=None, fz=None, dzi=None, dzj=None, dzk=None,
fps=None, dv=None):
"""
Generate the necessary field parameters of a numerical grid field
as an exatomic.field.AtomicField.
Args:
nrfps (int): number of field parameters with same dimensions
rmin (float): minimum value in an arbitrary cartesian direction
rmax (float): maximum value in an arbitrary cartesian direction
nr (int): number of grid points between rmin and rmax
xmin (float): minimum in x direction (optional)
xmax (float): maximum in x direction (optional)
ymin (float): minimum in y direction (optional)
ymax (float): maximum in y direction (optional)
zmin (float): minimum in z direction (optional)
zmax (float): maximum in z direction (optional)
nx (int): steps in x direction (optional)
ny (int): steps in y direction (optional)
nz (int): steps in z direction (optional)
ox (float): origin in x direction (optional)
oy (float): origin in y direction (optional)
oz (float): origin in z direction (optional)
dxi (float): x-component of x-vector specifying a voxel
dxj (float): y-component of x-vector specifying a voxel
dxk (float): z-component of x-vector specifying a voxel
dyi (float): x-component of y-vector specifying a voxel
dyj (float): y-component of y-vector specifying a voxel
dyk (float): z-component of y-vector specifying a voxel
dzi (float): x-component of z-vector specifying a voxel
dzj (float): y-component of z-vector specifying a voxel
dzk (float): z-component of z-vector specifying a voxel
label (str): an identifier passed to the widget (optional)
field_type (str): alternative identifier (optional)
Returns:
fps (pd.Series): field parameters
"""
if fps is not None: return pd.concat([fps.loc[0]] * nrfps, axis=1).T
if any((par is None for par in [rmin, rmax, nr])):
if all((par is not None for par in (xmin, xmax, nx,
ymin, ymax, ny,
zmin, zmax, nz))):
pass
elif all((par is None for par in (ox, dxi, dxj, dxk))):
raise Exception("Must supply at least rmin, rmax, nr or field"
" parameters as specified by a cube file.")
d = {}
allcarts = [['x', 0, xmin, xmax, nx, ox, (dxi, dxj, dxk)],
['y', 1, ymin, ymax, ny, oy, (dyi, dyj, dyk)],
['z', 2, zmin, zmax, nz, oz, (dzi, dzj, dzk)]]
for akey, aidx, amin, amax, na, oa, da in allcarts:
if oa is None:
amin = rmin if amin is None else amin
amax = rmax if amax is None else amax
na = nr if na is None else na
else: amin = oa
dw = [0, 0, 0]
if all(i is None for i in da): dw[aidx] = (amax - amin) / na
else: dw = da
d[akey] = [amin, na, dw]
fp = pd.Series({
'dxi': d['x'][2][0], 'dyj': d['y'][2][1], 'dzk': d['z'][2][2],
'dxj': d['x'][2][1], 'dyk': d['y'][2][2], 'dzi': d['z'][2][0],
'dxk': d['x'][2][2], 'dyi': d['y'][2][0], 'dzj': d['z'][2][1],
'ox': d['x'][0], 'oy': d['y'][0], 'oz': d['z'][0], 'frame': frame,
'nx': d['x'][1], 'ny': d['y'][1], 'nz': d['z'][1], 'label': label,
'field_type': field_type
})
return pd.concat([fp] * nrfps, axis=1).T
def _make_field(flds, fps):
"""Return an AtomicField from field arrays and parameters."""
try:
nvec = flds.shape[0]
if len(fps.index) == nvec:
fps.reset_index(drop=True, inplace=True)
return AtomicField(
fps, field_values=[flds[i] for i in range(nvec)])
return AtomicField(
make_fps(nrfps=nvec, fps=fps),
field_values=[flds[i] for i in range(nvec)])
except:
return AtomicField(
make_fps(nrfps=1, **fps),
field_values=[flds])
def _compute_current_density(bvs, gvx, gvy, gvz, cmatr, cmati, occvec, verbose=True):
"""Compute the current density in each cartesian direction."""
nbas, npts = bvs.shape
curx = np.zeros(npts, dtype=np.float64)
cury = np.zeros(npts, dtype=np.float64)
curz = np.zeros(npts, dtype=np.float64)
cval = np.zeros(nbas, dtype=np.float64)
if verbose:
fp = FloatProgress(description='Computing:')
display(fp)
for mu in range(nbas):
if verbose:
fp.value = mu / nbas * 100
crmu = cmatr[mu]
cimu = cmati[mu]
bvmu = bvs[mu]
gvxmu = gvx[mu]
gvymu = gvy[mu]
gvzmu = gvz[mu]
for nu in range(nbas):
crnu = cmatr[nu]
cinu = cmati[nu]
bvnu = bvs[nu]
gvxnu = gvx[nu]
gvynu = gvy[nu]
gvznu = gvz[nu]
cval = evaluate('-0.5 * (occvec * (crmu * cinu - cimu * crnu))', out=cval)
csum = cval.sum()
evaluate('curx + csum * (bvmu * gvxnu - gvxmu * bvnu)', out=curx)
evaluate('cury + csum * (bvmu * gvynu - gvymu * bvnu)', out=cury)
evaluate('curz + csum * (bvmu * gvznu - gvzmu * bvnu)', out=curz)
if verbose:
fp.close()
return curx, cury, curz
def _determine_vector(uni, vector, irrep=None):
"""Find some orbital indices in a universe."""
if irrep is not None: # Symmetry is fun
iorb = uni.orbital.groupby('irrep').get_group(irrep)
if vector is not None: # Check if vectors are in irrep
# Input vectors appropriately indexed by irrep
if all((i in iorb.vector.values for i in vector)):
return np.array(vector)
# Input vectors indexed in terms of total vectors
elif all((i in iorb.index.values for i in vector)):
return iorb.loc[vector]['vector'].values
else:
raise ValueError('One or more specified vectors '
'could not be found in uni.orbital.')
else:
ihomo = iorb[iorb['occupation'] < 1.98]
ihomo = ihomo.vector.values[0]
return np.array(range(max(0, ihomo-5),
min(ihomo + 7, len(iorb.index))))
# If specified, carry on
if isinstance(vector, int): return np.array([vector])
typs = (list, tuple, six.moves.range, np.ndarray)
if isinstance(vector, typs): return np.array(vector)
# Try to find some reasonable default
norb = len(uni.basis_set_order.index)
if vector is None:
if norb < 10:
return np.array(range(norb))
if hasattr(uni, 'orbital'):
homo = uni.orbital.get_orbital().vector
elif hasattr(uni.frame, 'N_e'):
homo = uni.frame['N_e'].values[0]
elif hasattr(uni.atom, 'Zeff'):
homo = uni.atom['Zeff'].sum() // 2
elif hasattr(uni.atom, 'Z'):
homo = uni.atom['Z'].sum() // 2
else:
uni.atom['Z'] = uni.atom['symbol'].map(sym2z)
homo = uni.atom['Z'].sum() // 2
if homo < 5:
return np.array(range(0, homo + 5))
else:
return np.array(range(homo - 5, homo + 7))
else:
raise TypeError('Try specifying vector as a list or int')
def _determine_fps(uni, fps, nvec):
"""Find some numerical grid parameters in a universe."""
if fps is None:
if hasattr(uni, 'field'):
return make_fps(nrfps=nvec, **uni.field.loc[0])
desc = uni.atom.describe()
kwargs = {'xmin': desc['x']['min'] - 5,
'xmax': desc['x']['max'] + 5,
'ymin': desc['y']['min'] - 5,
'ymax': desc['y']['max'] + 5,
'zmin': desc['z']['min'] - 5,
'zmax': desc['z']['max'] + 5,
'nx': 41, 'ny': 41, 'nz': 41,
'nrfps': nvec}
return make_fps(**kwargs)
return make_fps(nrfps=nvec, **fps)
def _check_column(uni, df, key):
"""Sanity checking of columns in a given dataframe in the universe.
Args:
uni (:class:`~exatomic.core.universe.Universe`): a universe
df (str): name of dataframe attribute in the universe
key (str): column name in df
Returns:
key (str) if key in uni.df
"""
if key is None:
if 'momatrix' in df: key = 'coef'
elif 'orbital' in df: key = 'occupation'
else: raise Exception("{} not supported".format(df))
err = '"{}" not in uni.{}.columns'.format
if key not in getattr(uni, df).columns:
raise Exception(err(key, df))
return key
@jit(nopython=True, nogil=True, parallel=nbpll)
def _compute_orbitals_numba(npts, bvs, vecs, cmat):
"""Compute orbitals from numerical basis functions."""
ovs = np.empty((len(vecs), npts), dtype=np.float64)
for i, vec in enumerate(vecs):
ovs[i] = np.dot(cmat[:, vec], bvs)
return ovs
def _compute_orbitals_numpy(npts, bvs, vecs, cmat):
"""Compute orbitals from numerical basis functions."""
ovs = np.empty((len(vecs), npts), dtype=np.float64)
for i, vec in enumerate(vecs):
ovs[i] = np.dot(cmat[:, vec], bvs)
return ovs
@jit(nopython=True, nogil=True, parallel=nbpll)
def _compute_density(ovs, occvec):
"""Sum orbitals multiplied by their occupations."""
norb, npts = ovs.shape
dens = np.empty(npts, dtype=np.float64)
for i in range(norb):
ovs[i] *= ovs[i]
dens = np.dot(occvec, ovs)
return dens
@jit(nopython=True, nogil=True, parallel=nbpll)
def _compute_orb_ang_mom(rx, ry, rz, jx, jy, jz, mxs):
"""Compute the orbital angular momentum in each direction and the sum."""
npts = rx.shape[0]
ang_mom = np.empty((4, npts), dtype=np.float64)
a0 = ry * jz - rz * jy
a1 = rz * jx - rx * jz
a2 = rx * jy - ry * jx
ang_mom[0] = mxs[0,0] * a0 + mxs[1,0] * a1 + mxs[2,0] * a2
ang_mom[1] = mxs[0,1] * a0 + mxs[1,1] * a1 + mxs[2,1] * a2
ang_mom[2] = mxs[0,2] * a0 + mxs[1,2] * a1 + mxs[2,2] * a2
ang_mom[3] = ang_mom[0] + ang_mom[1] + ang_mom[2]
return ang_mom
@jit(nopython=True, nogil=True, parallel=nbpll)
def _meshgrid3d(x, y, z):
"""Compute extended mesh gridded 1D-arrays from 1D-arrays."""
tot = len(x) * len(y) * len(z)
xs = np.empty(tot, dtype=np.float64)
ys = np.empty(tot, dtype=np.float64)
zs = np.empty(tot, dtype=np.float64)
cnt = 0
for i in x:
for j in y:
for k in z:
xs[cnt] = i
ys[cnt] = j
zs[cnt] = k
cnt += 1
return xs, ys, zs
|
apache-2.0
|
AIML/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
szaiser/pandas-qt
|
pandasqt/models/DataSearch.py
|
4
|
7032
|
# -*- coding: utf-8 -*-
from pandasqt.compat import Qt, QtCore, QtGui
import parser
import re
import numpy as np
import pandas as pd
class DataSearch(object):
"""object which provides parsing functionality for a DataFrame.
A `DataSearch` can apply custom filters defined as python expressions
to a `pandas.DataFrame` object. The dataframe will evaluate the expressions
and return a list with index which either match or fail the expression.
Attributes:
name (str): Each `DataSearch` object should have a name. The name could
be used to store different `DataSearch` objects as predefined filters.
"""
def __init__(self, name, filterString='', dataFrame=pd.DataFrame()):
"""Constructs a `DataSearch` object from the given attributes.
Args:
name (str): The name of the filter.
filterString (str, optional): A python expression as string.
Defaults to an empty string.
dataFrame (pandas.DataFrame, optional): The object to filter.
Defaults to an empty `DataFrame`.
"""
self._filterString = filterString
self._dataFrame = dataFrame
self.name = name
def __repr__(self):
string = u"DataSearch({}): {} ({})".format(hex(id(self)), self.name, self._filterString)
string = string.encode("utf-8")
return string
def dataFrame(self):
"""Getter method for the `dataFrame` attribute.
Note:
It's not implemented with python properties to keep Qt conventions.
Returns:
pandas.DataFrame: A `DataFrame` object.
"""
return self._dataFrame
def setDataFrame(self, dataFrame):
"""Updates/sets the dataFrame attribute of this class.
Args:
dataFrame (pandas.DataFrame): The new `dataFrame` object.
"""
self._dataFrame = dataFrame
def filterString(self):
"""Getter method for the `filterString` attribute.
Note:
It's not implemented with python properties to keep Qt conventions.
Returns:
str: the filter/python expression as string.
"""
return self._filterString
def setFilterString(self, filterString):
"""Updates/sets the filterString attribute of this class.
Args:
filterString (str): A python expression as string. All leading and
trailing spaces will be removed.
"""
## remove leading whitespaces, they will raise an identation error
filterString = filterString.strip()
self._filterString = filterString
def search(self):
"""Applies the filter to the stored dataframe.
A safe environment dictionary will be created, which stores all allowed
functions and attributes, which may be used for the filter.
If any object in the given `filterString` could not be found in the
dictionary, the filter does not apply and returns `False`.
Returns:
tuple: A (indexes, success)-tuple, which indicates identified objects
by applying the filter and if the operation was successful in
general.
"""
# there should be a grammar defined and some lexer/parser.
# instead of this quick-and-dirty implementation.
safeEnvDict = {
'freeSearch': self.freeSearch,
'extentSearch': self.extentSearch,
'indexSearch': self.indexSearch
}
for col in self._dataFrame.columns:
safeEnvDict[col] = self._dataFrame[col]
try:
searchIndex = eval(self._filterString, {'__builtins__': None}, safeEnvDict)
except NameError as err:
return [], False
except SyntaxError as err:
return [], False
except ValueError as err:
# the use of 'and'/'or' is not valid, need to use binary operators.
return [], False
except TypeError as err:
# argument must be string or compiled pattern
return [], False
return searchIndex, True
def freeSearch(self, searchString):
"""Execute a free text search for all columns in the dataframe.
Args:
searchString (str): Any string which may be contained in any column.
Returns:
list: A list containing all indexes with filtered data. Matches will
be `True`, the remaining items will be `False`. If the dataFrame
is empty, an empty list will be returned.
"""
if not self._dataFrame.empty:
# set question to the indexes of data and set everything to false.
question = self._dataFrame.index == -9999
for column in self._dataFrame.columns:
dfColumn = self._dataFrame[column]
dfColumn = dfColumn.apply(unicode)
question2 = dfColumn.str.contains(searchString, flags=re.IGNORECASE, regex=True, na=False)
question = np.logical_or(question, question2)
return question
else:
return []
def extentSearch(self, xmin, ymin, xmax, ymax):
"""Filters the data by a geographical bounding box.
The bounding box is given as lower left point coordinates and upper
right point coordinates.
Note:
It's necessary that the dataframe has a `lat` and `lng` column
in order to apply the filter.
Check if the method could be removed in the future. (could be done
via freeSearch)
Returns:
list: A list containing all indexes with filtered data. Matches will
be `True`, the remaining items will be `False`. If the dataFrame
is empty, an empty list will be returned.
"""
if not self._dataFrame.empty:
try:
questionMin = (self._dataFrame.lat >= xmin) & (self._dataFrame.lng >= ymin)
questionMax = (self._dataFrame.lat <= xmax) & (self._dataFrame.lng <= ymax)
return np.logical_and(questionMin, questionMax)
except AttributeError:
return []
else:
return []
def indexSearch(self, indexes):
"""Filters the data by a list of indexes.
Args:
indexes (list of int): List of index numbers to return.
Returns:
list: A list containing all indexes with filtered data. Matches will
be `True`, the remaining items will be `False`. If the dataFrame
is empty, an empty list will be returned.
"""
if not self._dataFrame.empty:
filter0 = self._dataFrame.index == -9999
for index in indexes:
filter1 = self._dataFrame.index == index
filter0 = np.logical_or(filter0, filter1)
return filter0
else:
return []
|
mit
|
adammenges/statsmodels
|
docs/source/plots/graphics_gofplots_qqplot.py
|
38
|
1911
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
|
bsd-3-clause
|
potash/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
119
|
1679
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
ZENGXH/scikit-learn
|
sklearn/feature_selection/variance_threshold.py
|
238
|
2594
|
# Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
|
bsd-3-clause
|
laosiaudi/tensorflow
|
tensorflow/examples/learn/text_classification.py
|
8
|
4925
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = tf.contrib.layers.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return (
{'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)},
loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
a-doumoulakis/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/debug_test.py
|
46
|
32817
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)], labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):], labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(collections.Counter(train_labels).items(),
key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_classes(input_fn=_input_fn_builder(test_features,
None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features, train_labels), (
test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_builder(train_features, train_labels),
steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = debug.DebugClassifier(config=run_config.RunConfig(
tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(input_fn=_input_fn_builder(test_features,
None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
Lawrence-Liu/scikit-learn
|
examples/feature_stacker.py
|
246
|
1906
|
"""
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
|
bsd-3-clause
|
charanpald/wallhack
|
wallhack/modelselect/RealDataTreeExp3.py
|
1
|
4315
|
"""
Test how the penalty varied for a fixed gamma with the number of examples.
"""
import logging
import numpy
import sys
import multiprocessing
from sandbox.util.PathDefaults import PathDefaults
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from sandbox.util.Sampling import Sampling
from exp.sandbox.predictors.DecisionTreeLearner import DecisionTreeLearner
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(21)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
sampleSizes = numpy.arange(50, 250, 25)
#datasets = [datasets[1]]
for datasetName, numRealisations in datasets:
logging.debug("Dataset " + datasetName)
meanErrors = numpy.zeros(sampleSizes.shape[0])
meanPenalties = numpy.zeros(sampleSizes.shape[0])
meanIdealPenalities = numpy.zeros(sampleSizes.shape[0])
k = 0
for sampleSize in sampleSizes:
logging.debug("Sample size " + str(sampleSize))
errors = numpy.zeros(numRealisations)
sampleMethod = Sampling.crossValidation
#Setting maxDepth = 50 and minSplit = 5 doesn't effect results
numProcesses = multiprocessing.cpu_count()
learner = DecisionTreeLearner(criterion="mse", maxDepth=100, minSplit=1, pruneType="CART", processes=numProcesses)
learner.setChunkSize(3)
paramDict = {}
paramDict["setGamma"] = numpy.array([31], dtype=numpy.int)
numParams = paramDict["setGamma"].shape[0]
alpha = 1.0
folds = 4
numRealisations = 10
Cvs = numpy.array([folds-1])*alpha
meanAllErrors = numpy.zeros(numParams)
meanTrainError = numpy.zeros(numParams)
treeSizes = numpy.zeros(numParams)
treeDepths = numpy.zeros(numParams)
treeLeaveSizes = numpy.zeros(numParams)
for j in range(numRealisations):
print("")
logging.debug("j=" + str(j))
trainX, trainY, testX, testY = loadMethod(dataDir, datasetName, j)
logging.debug("Loaded dataset with " + str(trainX.shape) + " train and " + str(testX.shape) + " test examples")
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
trainX = trainX[trainInds,:]
trainY = trainY[trainInds]
idx = sampleMethod(folds, trainX.shape[0])
#Now try penalisation
resultsList = learner.parallelPen(trainX, trainY, idx, paramDict, Cvs)
bestLearner, trainErrors, currentPenalties = resultsList[0]
meanPenalties[k] += currentPenalties
meanTrainError += trainErrors
predY = bestLearner.predict(testX)
meanErrors[k] += bestLearner.getMetricMethod()(testY, predY)
#Compute ideal penalties and error on training data
meanIdealPenalities[k] += learner.parallelPenaltyGrid(trainX, trainY, testX, testY, paramDict)
for i in range(len(paramDict["setGamma"])):
allError = 0
learner.setGamma(paramDict["setGamma"][i])
for trainInds, testInds in idx:
validX = trainX[trainInds, :]
validY = trainY[trainInds]
learner.learnModel(validX, validY)
predY = learner.predict(trainX)
allError += learner.getMetricMethod()(predY, trainY)
meanAllErrors[i] += allError/float(len(idx))
k+= 1
numRealisations = float(numRealisations)
meanErrors /= numRealisations
meanPenalties /= numRealisations
meanIdealPenalities /= numRealisations
print(meanErrors)
plt.plot(sampleSizes, meanPenalties*numpy.sqrt(sampleSizes), label="Penalty")
plt.plot(sampleSizes, meanIdealPenalities*numpy.sqrt(sampleSizes), label="Ideal penalty")
plt.xlabel("Sample sizes")
plt.ylabel("Penalty")
plt.legend()
plt.show()
|
gpl-3.0
|
MaxParsons/amo-physics
|
liexperiment/raman/coherent_population_transfer.py
|
1
|
4756
|
'''
Created on Feb 18, 2015
@author: Max
'''
import numpy as np
import numpy.matlib
from scipy.integrate import ode
import matplotlib.pyplot as plt
from itertools import product
class RamanTransition(object):
def __init__(self):
self.n_vibrational = 5
self.trap_frequency = 0.5e6
self.anharmonicity = 26.0e3
self.lamb_dicke = 0.28
self.initial_state = np.zeros(2 * self.n_vibrational, dtype="complex64")
self.initial_state[0] = 1.0 / np.sqrt(2.0)
self.initial_state[1] = 1.0 / np.sqrt(2.0)
self.constant_rabi = 500.0e3
self.constant_detuning = -500.0e3
self.simulation_duration = 10.0 / self.constant_rabi
self.simulation_nsteps = 500.0
# simulation results
self.pops = None
self.pops_ground = None
self.pops_excited = None
self.nbars = None
self.wavefunctions = None
self.times = None
def trap_energies(self, n):
return 2.0 * np.pi * (n * self.trap_frequency - 0.5 * (n - 1) * n * self.anharmonicity)
def detuning(self, t):
return 2.0 * np.pi * self.constant_detuning
def rabi(self, t):
return 2.0 * np.pi * self.constant_rabi
# def nfactor(self, m, n):
# if m == n:
# return 1.0
# elif m > n:
# facs = np.arange(m, n)
# return np.product(np.sqrt(facs))
# elif m < n:
# facs = np.arange(m, n)
# return np.product(np.sqrt(facs + 1))
def hamiltonian(self, t):
# ham0 = numpy.matlib.zeros((2 * self.n_vibrational, 2 * self.n_vibrational), dtype="complex64")
# ham1 = numpy.matlib.zeros((2 * self.n_vibrational, 2 * self.n_vibrational), dtype="complex64")
ham0 = np.diag(self.trap_energies(self._vibrational_numbers) - self.detuning(t) * self._internal_numbers)
internal_coupling = np.logical_not(np.equal(self._electronic_outer_right, self._electronic_outer_left)) + 0
lamb_dicke = self.lamb_dicke ** np.abs(self._vibrational_outer_right - self._vibrational_outer_left)
energy_difference = self.trap_energies(self._vibrational_outer_right) - self.trap_energies(self._vibrational_outer_left)
exp_factor = np.exp(-1.0j * (self.detuning(t) - energy_difference))
rtn_factors = 1.0
ham1 = internal_coupling * 0.5 * lamb_dicke * self.rabi(t) * rtn_factors * exp_factor
# for m in range(0, self.n_vibrational):
# for n in range(self.n_vibrational, 2 * self.n_vibrational):
# ham1[m, n] = 0.5 * self.lamb_dicke ** np.abs((n - self.n_vibrational) - m) * self.rabi(t) * \
# np.exp(-1.0j * (self.detuning(t) - (self.trap_energies(n - self.n_vibrational) - self.trap_energies(m))) * t)
return np.matrix(ham0 + ham1, dtype="complex64")
# def hamiltonian(self, t):
# ham0 = numpy.matlib.zeros((2 * self.n_vibrational, 2 * self.n_vibrational), dtype="complex64")
# ham1 = numpy.matlib.zeros((2 * self.n_vibrational, 2 * self.n_vibrational), dtype="complex64")
def _rhs(self, t, y):
return 1.0j * np.dot(self.hamiltonian(t), y)
def compute_quantum_numbers(self):
self._vibrational_numbers = np.array(range(0, self.n_vibrational) + range(0, self.n_vibrational))
self._internal_numbers = np.array([0] * (self.n_vibrational) + [1] * (self.n_vibrational))
self._vibrational_outer_right, self._vibrational_outer_left = np.meshgrid(self._vibrational_numbers, self._vibrational_numbers)
self._electronic_outer_right, self._electronic_outer_left = \
np.meshgrid(self._internal_numbers, self._internal_numbers)
def compute_dynamics(self):
# useful arrays for vectorized hamiltonia
self.compute_quantum_numbers()
# do integration
r = ode(self._rhs).set_integrator('zvode')
r.set_initial_value(self.initial_state, 0.0)
t1 = self.simulation_duration
dt = t1 / self.simulation_nsteps
ts = []
ts.append(0.0)
ys = []
ys.append(self.initial_state)
while r.successful() and r.t < t1:
r.integrate(r.t + dt)
ts.append(r.t)
ys.append(r.y)
self.times = np.array(ts)
self.wavefunctions = np.array(ys)
self.pops = np.abs(ys) ** 2
self.pops_ground = np.sum(self.pops[:, 0:self.n_vibrational - 1], axis=1)
self.pops_excited = np.sum(self.pops[:, self.n_vibrational:-1], axis=1)
vib_states = np.append(np.arange(0, self.n_vibrational), np.arange(0, self.n_vibrational))
self.nbars = np.sum(self.pops * vib_states, axis=1)
|
mit
|
gef756/statsmodels
|
statsmodels/sandbox/distributions/examples/matchdist.py
|
33
|
9822
|
'''given a 1D sample of observation, find a matching distribution
* estimate maximum likelihood paramater for each distribution
* rank estimated distribution by Kolmogorov-Smirnov and Anderson-Darling
test statistics
Author: Josef Pktd
License: Simplified BSD
original December 2008
TODO:
* refactor to result class
* split estimation by support, add option and choose automatically
*
'''
from __future__ import print_function
from scipy import stats
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#stats.distributions.beta_gen._fitstart = lambda self, data : (5,5,0,1)
def plothist(x,distfn, args, loc, scale, right=1):
plt.figure()
# the histogram of the data
n, bins, patches = plt.hist(x, 25, normed=1, facecolor='green', alpha=0.75)
maxheight = max([p.get_height() for p in patches])
print(maxheight)
axlim = list(plt.axis())
#print(axlim)
axlim[-1] = maxheight*1.05
#plt.axis(tuple(axlim))
## print(bins)
## print('args in plothist', args)
# add a 'best fit' line
#yt = stats.norm.pdf( bins, loc=loc, scale=scale)
yt = distfn.pdf( bins, loc=loc, scale=scale, *args)
yt[yt>maxheight]=maxheight
lt = plt.plot(bins, yt, 'r--', linewidth=1)
ys = stats.t.pdf( bins, 10,scale=10,)*right
ls = plt.plot(bins, ys, 'b-', linewidth=1)
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Testing: %s :}\ \mu=%f,\ \sigma=%f$'%(distfn.name,loc,scale))
#plt.axis([bins[0], bins[-1], 0, 0.134+0.05])
plt.grid(True)
plt.draw()
#plt.show()
#plt.close()
#targetdist = ['norm','t','truncnorm','johnsonsu','johnsonsb',
targetdist = ['norm','alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invnorm', 'invweibull', 'johnsonsb',
'johnsonsu', 'laplace', 'levy', 'levy_l',
'logistic', 'loggamma', 'loglaplace', 'lognorm', 'gilbrat',
'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace']
left = []
right = []
finite = []
unbound = []
other = []
contdist = []
discrete = []
categ = {('open','open'):'unbound', ('0','open'):'right',('open','0',):'left',
('finite','finite'):'finite',('oth','oth'):'other'}
categ = {('open','open'):unbound, ('0','open'):right,('open','0',):left,
('finite','finite'):finite,('oth','oth'):other}
categ2 = {
('open', '0') : ['frechet_l', 'weibull_max', 'levy_l'],
('finite', 'finite') : ['anglit', 'cosine', 'rdist', 'semicircular'],
('0', 'open') : ['alpha', 'burr', 'fisk', 'chi', 'chi2', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy', 'f',
'foldnorm', 'frechet_r', 'weibull_min', 'genpareto', 'genexpon',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'halfcauchy',
'halflogistic', 'halfnorm', 'invgamma', 'invnorm', 'invweibull',
'levy', 'loglaplace', 'lognorm', 'gilbrat', 'maxwell', 'mielke',
'nakagami', 'ncx2', 'ncf', 'lomax', 'powerlognorm', 'rayleigh',
'rice', 'recipinvgauss', 'truncexpon', 'wald'],
('open', 'open') : ['cauchy', 'dgamma', 'dweibull', 'genlogistic', 'genextreme',
'gumbel_r', 'gumbel_l', 'hypsecant', 'johnsonsu', 'laplace',
'logistic', 'loggamma', 't', 'nct', 'powernorm', 'reciprocal',
'truncnorm', 'tukeylambda', 'vonmises'],
('0', 'finite') : ['arcsine', 'beta', 'betaprime', 'bradford', 'gausshyper',
'johnsonsb', 'powerlaw', 'triang', 'uniform', 'wrapcauchy'],
('finite', 'open') : ['pareto']
}
#Note: weibull_max == frechet_l
right_incorrect = ['genextreme']
right_all = categ2[('0', 'open')] + categ2[('0', 'finite')] + categ2[('finite', 'open')]\
+ right_incorrect
for distname in targetdist:
distfn = getattr(stats,distname)
if hasattr(distfn,'_pdf'):
if np.isinf(distfn.a):
low = 'open'
elif distfn.a == 0:
low = '0'
else:
low = 'finite'
if np.isinf(distfn.b):
high = 'open'
elif distfn.b == 0:
high = '0'
else:
high = 'finite'
contdist.append(distname)
categ.setdefault((low,high),[]).append(distname)
not_good = ['genextreme', 'reciprocal', 'vonmises']
# 'genextreme' is right (or left?), 'reciprocal' requires 0<a<b, 'vonmises' no a,b
targetdist = [f for f in categ[('open', 'open')] if not f in not_good]
not_good = ['wrapcauchy']
not_good = ['vonmises']
not_good = ['genexpon','vonmises']
#'wrapcauchy' requires additional parameter (scale) in argcheck
targetdist = [f for f in contdist if not f in not_good]
#targetdist = contdist
#targetdist = not_good
#targetdist = ['t', 'f']
#targetdist = ['norm','burr']
if __name__ == '__main__':
#TODO: calculate correct tail probability for mixture
prefix = 'run_conv500_1_'
convol = 0.75
n = 500
dgp_arg = 10
dgp_scale = 10
results = []
for i in range(1):
rvs_orig = stats.t.rvs(dgp_arg,scale=dgp_scale,size=n*convol)
rvs_orig = np.hstack((rvs_orig,stats.halflogistic.rvs(loc=0.4, scale=5.0,size =n*(1-convol))))
rvs_abs = np.absolute(rvs_orig)
rvs_pos = rvs_orig[rvs_orig>0]
rightfactor = 1
rvs_right = rvs_pos
print('='*50)
print('samplesize = ', n)
for distname in targetdist:
distfn = getattr(stats,distname)
if distname in right_all:
rvs = rvs_right
rind = rightfactor
else:
rvs = rvs_orig
rind = 1
print('-'*30)
print('target = %s' % distname)
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
ssupp = (rvs.min(), rvs.max())
if distname in ['truncnorm','betaprime','reciprocal']:
par0 = (sm-2*sstd,sm+2*sstd)
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd,*par0))
elif distname == 'norm':
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd))
elif distname == 'genextreme':
par_est = tuple(distfn.fit(rvs,-5,loc=sm,scale=sstd))
elif distname == 'wrapcauchy':
par_est = tuple(distfn.fit(rvs,0.5,loc=0,scale=sstd))
elif distname == 'f':\
par_est = tuple(distfn.fit(rvs,10,15,loc=0,scale=1))
elif distname in right:
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
par_est = tuple(distfn.fit(rvs,loc=0,scale=1))
else:
sm = rvs.mean()
sstd = np.sqrt(rvs.var())
par_est = tuple(distfn.fit(rvs,loc=sm,scale=sstd))
print('fit', par_est)
arg_est = par_est[:-2]
loc_est = par_est[-2]
scale_est = par_est[-1]
rvs_normed = (rvs-loc_est)/scale_est
ks_stat, ks_pval = stats.kstest(rvs_normed,distname, arg_est)
print('kstest', ks_stat, ks_pval)
quant = 0.1
crit = distfn.ppf(1-quant*float(rind), loc=loc_est, scale=scale_est,*par_est)
tail_prob = stats.t.sf(crit,dgp_arg,scale=dgp_scale)
print('crit, prob', quant, crit, tail_prob)
#if distname == 'norm':
#plothist(rvs,loc_est,scale_est)
#args = tuple()
results.append([distname,ks_stat, ks_pval,arg_est,loc_est,scale_est,crit,tail_prob ])
#plothist(rvs,distfn,arg_est,loc_est,scale_est)
#plothist(rvs,distfn,arg_est,loc_est,scale_est)
#plt.show()
#plt.close()
#TODO: collect results and compare tail quantiles
from operator import itemgetter
res_sort = sorted(results, key = itemgetter(2))
res_sort.reverse() #kstest statistic: smaller is better, pval larger is better
print('number of distributions', len(res_sort))
imagedir = 'matchresults'
import os
if not os.path.exists(imagedir):
os.makedirs(imagedir)
for ii,di in enumerate(res_sort):
distname,ks_stat, ks_pval,arg_est,loc_est,scale_est,crit,tail_prob = di[:]
distfn = getattr(stats,distname)
if distname in right_all:
rvs = rvs_right
rind = rightfactor
ri = 'r'
else:
rvs = rvs_orig
ri = ''
rind = 1
print('%s ks-stat = %f, ks-pval = %f tail_prob = %f)' % \
(distname, ks_stat, ks_pval, tail_prob))
## print('arg_est = %s, loc_est = %f scale_est = %f)' % \
## (repr(arg_est),loc_est,scale_est))
plothist(rvs,distfn,arg_est,loc_est,scale_est,right = rind)
plt.savefig(os.path.join(imagedir,'%s%s%02d_%s.png'% (prefix, ri,ii, distname)))
##plt.show()
##plt.close()
|
bsd-3-clause
|
plesager/barakuda
|
python/exec/movie_square_zoom_IFS.py
|
2
|
5894
|
#!/usr/bin/env python
# B a r a K u d a
#
# Prepare 2D maps (monthly) that will later become a GIF animation!
# NEMO output and observations needed
#
# L. Brodeau, november 2016
import sys
import os
import numpy as nmp
from netCDF4 import Dataset
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import datetime
import barakuda_colmap as bcm
import barakuda_tool as bt
year_ref_ini = 1990
#CTATM = 'T255'
CTATM = 'T1279'
if CTATM == 'T255':
# South Greenland:
#i1 = 412; i2 =486
#j1 = 22 ; j2 = 56
# NAtl:
i1 = 385 ; i2= 540
j1 = 6 ; j2 = 84
#Global T255:
#i1 = 0 ; i2 =511
#j1 = 0 ; j2 = 255
elif CTATM == 'T1279':
#
#Global:
i1 = 0 ; i2 = 2559+1
j1 = 0 ; j2 = 1279+1
# Natl:
##i1 = 1849 ; i2 = 2525
##j1 = 97 ; j2 = 508
##i1 = 1960 ; i2 = 2550; #2680
##i1 = 1849 ; i2 = 2525
##j1 = 97 ; j2 = 508
#i1 = 2000 ; i2 = 2590
#j1 = 0 ; j2 = 519
else:
print 'UNKNOW ATMOSPHERE RESOLUTION!'; sys.exit(0)
fig_type='png'
narg = len(sys.argv)
if narg < 4: print 'Usage: '+sys.argv[0]+' <file> <variable> <LSM_file>'; sys.exit(0)
cf_in = sys.argv[1] ; cv_in=sys.argv[2] ; cf_lsm=sys.argv[3]
lsst = False ; lshf = False
if cv_in == 'T2M': lt2m = True
if cv_in == 'SSTK': lsst = True
if cv_in == 'SNHF': lshf = True
if lt2m:
#tmin=-16. ; tmax=28. ; dt = 1.
tmin=-2. ; tmax=28. ; dt = 1.
cpal = 'ncview_nrl'
#cpal = 'jaisnd'
#cpal = '3gauss'
#cpal = 'rainbow2_cmyk'
#cpal = 'rainbow'
#cpal = 'rnb2'
#cpal = 'jaisnc'
#cpal = 'jaisnb'
cfield = 'T2M'
cunit = r'$^{\circ}C$'
cb_jump = 2
if lsst:
tmin=-20. ; tmax=12. ; dt = 1.
cpal = 'sstnw'
cfield = 'SST'
cunit = r'$Boo$'
cb_jump = 2
if lshf:
tmin=-1200. ; tmax=400. ; dt = 25.
#cpal = 'rainbow'
cpal = 'ncview_nrl'
cfield = 'Net Heat Flux'
cunit = r'$W/m^2$'
cb_jump = 4
clsm = 'LSM'
# Need to know dimension:
bt.chck4f(cf_lsm)
id_lsm = Dataset(cf_lsm)
vlon = id_lsm.variables['lon'][:]
vlat = id_lsm.variables['lat'][:]
id_lsm.close()
Ni0 = len(vlon)
Nj0 = len(vlat)
print '\n Dimension of global domain:', Ni0, Nj0
imax=Ni0+1
Ni = i2-i1
Nj = j2-j1
LSM = nmp.zeros((Nj,Ni), dtype=nmp.float)
XIN = nmp.zeros((Nj,Ni))
id_lsm = Dataset(cf_lsm)
if i2 >= imax:
print ' i2 > imax !!! => ', i2, '>', imax
Xall = id_lsm.variables[clsm][0,j1:j2,:]
LSM[:,0:imax-i1] = Xall[:,i1-1:imax]
ii=imax-i1
LSM[:,ii:Ni] = Xall[:,0:i2-imax]
del Xall
else:
LSM[:,:] = id_lsm.variables[clsm][0,j1:j2,i1:i2]
id_lsm.close()
[ nj , ni ] = nmp.shape(LSM)
idx_ocean = nmp.where(LSM[:,:] < 0.5)
LSM[idx_ocean] = nmp.nan
LSM = nmp.flipud(LSM)
params = { 'font.family':'Ubuntu',
'font.size': int(12),
'legend.fontsize': int(12),
'xtick.labelsize': int(12),
'ytick.labelsize': int(12),
'axes.labelsize': int(12) }
mpl.rcParams.update(params)
cfont_clb = { 'fontname':'Arial', 'fontweight':'normal', 'fontsize':13 }
cfont_title = { 'fontname':'Ubuntu Mono', 'fontweight':'normal', 'fontsize':18 }
cfont_mail = { 'fontname':'Times New Roman', 'fontweight':'normal', 'fontstyle':'italic', 'fontsize':9, 'color':'0.5' }
# Pal_Sst:
pal_fld = bcm.chose_colmap(cpal)
norm_fld = colors.Normalize(vmin = tmin, vmax = tmax, clip = False)
pal_lsm = bcm.chose_colmap('blk')
norm_lsm = colors.Normalize(vmin = 0, vmax = 1, clip = False)
vc_fld = nmp.arange(tmin, tmax + dt, dt)
pfin = nmp.zeros((nj,ni))
bt.chck4f(cf_in)
id_in = Dataset(cf_in)
vtime = id_in.variables['time'][:]
id_in.close()
del id_in
Nt = len(vtime)
# Size of the figure:
rat_Nj_Ni = float(Nj)/float(Ni) + 0.12
rh = 7.5
rw = rh/rat_Nj_Ni
FSZ = ( rw , rh )
rcorr = rat_Nj_Ni/(float(Nj0)/float(Ni0))
print ' rcorr => ', rcorr
for jt in range(Nt):
print '\n *** Reading record # '+str(jt+1)+' of '+cv_in+' in '+cf_in
id_in = Dataset(cf_in)
if i2 >= imax:
print ' i2 = ', i2
Xall = id_in.variables[cv_in][jt,j1:j2,:]
XIN[:,0:imax-i1] = Xall[:,i1-1:imax]
ii=imax-i1
XIN[:,ii:Ni] = Xall[:,0:i2-imax]
del Xall
else:
XIN[:,:] = id_in.variables[cv_in][jt,j1:j2,i1:i2]
id_in.close()
del id_in
if lsst or lt2m: XIN[:,:] = XIN[:,:] - 273.15
ct = '%3.3i'%(jt+1)
cd = str(datetime.datetime.strptime(str(year_ref_ini)+' '+ct, '%Y %j'))
cdate = cd[:10] ; print ' *** cdate :', cdate
cfig = 'figs/'+cv_in+'_IFS'+'_d'+ct+'.'+fig_type
fig = plt.figure(num = 1, figsize=FSZ, dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.055, 0.05, 0.9, 1.], axisbg = 'k')
cf = plt.imshow(nmp.flipud(XIN), cmap = pal_fld, norm = norm_fld)
plt.axis([ 0, ni, 0, nj])
# Mask
print ' LSM stuff...'
cm = plt.imshow(LSM, cmap = pal_lsm, norm = norm_lsm)
plt.title('IFS: '+cfield+', coupled ORCA12-'+CTATM+', '+cdate, **cfont_title)
ax2 = plt.axes([0.04, 0.08, 0.93, 0.025])
clb = mpl.colorbar.ColorbarBase(ax2, ticks=vc_fld, cmap=pal_fld, norm=norm_fld, orientation='horizontal', extend='both')
#clb = plt.colorbar(cf, ticks=vc_fld, orientation='horizontal', drawedges=False, pad=0.07, shrink=1., aspect=40)
cb_labs = [] ; cpt = 0
for rr in vc_fld:
if cpt % cb_jump == 0:
cb_labs.append(str(int(rr)))
else:
cb_labs.append(' ')
cpt = cpt + 1
clb.ax.set_xticklabels(cb_labs)
clb.set_label(cunit, **cfont_clb)
del cf
ax.annotate('[email protected]', xy=(1, 4), xytext=(480, -85), **cfont_mail)
plt.savefig(cfig, dpi=160, orientation='portrait', transparent=False)
print cfig+' created!\n'
plt.close(1)
del fig, ax, clb, cm
|
gpl-2.0
|
pyspace/test
|
pySPACE/missions/nodes/visualization/average_and_feature_vis.py
|
1
|
40990
|
""" Visualize average of :mod:`time series <pySPACE.resources.data_types.time_series>` and time domain features
Additional features can be added to the visualization.
"""
import os
import cPickle
import pylab
import matplotlib.font_manager
from matplotlib import colors
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.resources.data_types.time_series import TimeSeries
from pySPACE.resources.dataset_defs.stream import StreamDataset
from pySPACE.tools.filesystem import create_directory
def convert_feature_vector_to_time_series(feature_vector, sample_data):
""" Parse the feature name and reconstruct a time series object holding the equivalent data
In a feature vector object, a feature is determined by the feature
name and the feature value. When dealing with time domain features, the
feature name is a concatenation of the (pseudo-) channel
name and the time within an epoch in seconds. A typical feature name
reads, e.g., "TD_F7_0.960sec".
"""
# channel name is what comes after the first underscore
feat_channel_names = [chnames.split('_')[1]
for chnames in
feature_vector.feature_names]
# time is what comes after the second underscore
feat_times = [int(float((chnames.split('_')[2])[:-3])
* sample_data.sampling_frequency)
for chnames in feature_vector.feature_names]
# generate new time series object based on the exemplary "sample_data"
# all filled with zeros instead of data
new_data = TimeSeries(pylab.zeros(sample_data.shape),
channel_names=sample_data.channel_names,
sampling_frequency=sample_data.sampling_frequency,
start_time=sample_data.start_time,
end_time=sample_data.end_time,
name=sample_data.name,
marker_name=sample_data.marker_name)
# try to find the correct place (channel name and time)
# to insert the feature values
for i in range(len(feature_vector)):
try:
new_data[feat_times[i],
new_data.channel_names.index(feat_channel_names[i])] = \
feature_vector[i]
except ValueError:
import warnings
warnings.warn("\n\nFeatureVis can't find equivalent to Feature "+
feature_vector.feature_names[i] +
" in the time series.\n")
return new_data
class AverageFeatureVisNode(BaseNode):
""" Visualize time domain features in the context of average time series.
This node is supposed to visualize features from any feature
selection algorithm in the context of the train. This data is some kind of
time series, either channelwise "plain" EEG time series or somehow
preprocessed data, e.g. the time series of CSP pseudo channels.
The purpose is to investigate two main issues:
1. By comparing the mean time series of standard and target time windows,
is it understandable why certain features have been selected?
2. Comparing the time series from one set to the selected features from
some other set, are the main features robust?
If no features are passed to this node, it will still visualize average
time series in any case. Only the time series that are labeled as training
data will be taken into account. The reason is that the primary aim of
this node is to visualize the features on the very data they were chosen
from, i.e., the training data. If instead all data is to be plotted (e.g.,
at the end of a preprocessing flow), one would in the worst case have to
run the node chain twice. In the extra run for the visualization, an
All_Train_Splitter would be used prior to this node.
This is what this node will plot:
- In a position in the node chain where the current data object is a time
series, it will plot the average of all training samples if the
current time series.
- If the current object is not a time series, this node will go back in
the data object's history until it finds a time series. This time series
will then be used.
- If a path to a features.pickle is passed using the load_feature_path
variable, then this features will be used for plotting.
- If no load_feature_path is set, this node will check if the current data
object has a data.predictor.features entry. This will be the case if the
previous node has been a classifier. If so, these features will be used.
- If features are found in neither of the aforementioned two locations, no
features will be plotted. The average time series however will still be
plotted.
**Parameters**
:load_feature_path:
Path to the stored pickle file containing the selected features.
So far, LibSVM and 1-Norm SVM nodes can deliver this output.
Defaults to 'None', which implies that no features are plotted.
The average time series are plotted anyway.
(*optional, default: None*)
:error_type:
Selects which type of error is into the average time series plots:
:None: No errors
:'SampleStdDev': +- 1 Sample standard deviation.
This is, under Gaussian assumptions, the area, in which 68% of the
samples lie.
:'StdError': +- 1 Standard error of the mean.
This is the area in which, under Gaussian assumptions, the sample
mean will end up in 68% of all cases.
If multiples of this quantities are desired, simply use them as prefix
in the strings.
With Multiplier 2, the above percentages change to 95%
With Multiplier 3, the above percentages change to 99.7%
Here are examples for valid entries:
'2SampleStdDev', None, 'StdError', '2StdError', '1.7StdError'
(*optional, default: '2StdError'*)
:axflip:
If axflip is True, the y-axes of the averaged time series plots
are reversed. This makes the plots look the way to which psychologists
(and even some neuro scientists) are used.
(*optional, default: False*)
:alternative_scaling:
If False, the values from the loaded feature file (i.e. the "w" in the
SVM notation) are directly used for both graphical feature
representation and rating of "feature importance". If True, instead
the product of these values and the difference of the averaged time
domain feature values of both classes is used: importance(i) = w(i) *
(avg_target(i) - avg_standard(i)) On the one hand, using the feature
averages implicitly assumes normally distributed features. On the
other hand, this computation takes into account the fact that
different features have different value ranges. The eventual
classification with SVMs is done by evaluating the
sum_i{ w(i) * feature(i) }.
In that sense, the here defined importance measures the average
contribution of a certain feature to the classification function.
As such, and that's the essential point, it makes the values
comparable.
(*optional, default: False*)
:physiological_arrangement:
If False all time series plots are arranged in a matrix of plots. If
set to True, the plots are arranged according to the arrangement of
the electrodes on the scalp. Obviously, this only makes sense if the
investigated time series are not spatially filtered. CSP pseudo
channels, e.g., can't be arranged on the scalp.
(*optional, default: False*)
:shrink_plots:
Defaults to False and is supposed to be set to True, whenever channels
from the 64 electrode cap are investigated jointly with electrodes
from 128 cap that do not appear on the 64 cap. Omits overlapping of
the plots in physiological arrangement.
(*optional, default: False*)
:important_feature_thresh:
Gives a threshold below which features are not considered important.
Only important features will appear in the plots.
Defaults to 0, i.e. all non-zero features are important.
This parameter collides with percentage_of_features; the stricter
restriction applies.
(*optional, default: 0.0*)
:percentage_of_features:
Define the percentage of features to be drawn in the plots.
Defaults to 100, i.e. all features are to be used.
This parameter collides with important_feature_thresh; the stricter
restriction applies. Thus, even in the default case, most of the time
less than 100% of the features will be drawn due to the non-zero
condition of the important_feature_thresh parameter.
Note that the given percentage is in relation to the total number of
features; not in relation to the number of features a classifier has
used in some sense.
(*optional, default: 100*)
:emotiv:
Use the emotiv parameter if the data was acquired wit the emotiv EPOC
system. This will just change the position of text in the plots - it's
not visible otherwise.
(*optional, default: False*)
**Known Issues**
The title of physiologically arranged time series plots vanishes, if no
frontal channels are plotted, because the the plot gets trimmed and so
gets the title.
**Exemplary Call**
.. code-block:: yaml
-
node : AverageFeatureVis
parameters :
load_feature_path : "/path/to/my/features.pickle"
alternative_scaling : True
physiological_arrangement : True
axflip : True
shrink_plots : False
important_feature_thresh : 0.3
percentage_of_features : 20
error_type : "2SampleStdDev"
:Author: David Feess ([email protected])
:Created: 2010/02/10
:Reviewed: 2011/06/24
"""
def __init__(self,
load_feature_path='None',
axflip= False,
alternative_scaling=False,
physiological_arrangement=False,
shrink_plots=False,
important_feature_thresh=0.0,
percentage_of_features=100,
emotiv=False,
error_type='2StdError',
**kwargs):
# Must be set before constructor of superclass is set
self.trainable = True
super(AverageFeatureVisNode, self).__init__(store=True, **kwargs)
# try to read the file containing the feature information
feature_vector = None
try:
feature_file = open(load_feature_path, 'r')
feature_vector = cPickle.load(feature_file)
feature_vector = feature_vector[0] #[0]!!
feature_file.close()
except:
print "FeatureVis: No feature file to load."+\
" Will search in current data object."
self.set_permanent_attributes(
alternative_scaling=alternative_scaling,
physiological_arrangement=physiological_arrangement,
feature_vector=feature_vector,
important_feature_thresh=important_feature_thresh,
percentage_of_features=percentage_of_features,
shrink_plots=shrink_plots,
max_feature_val=None,
feature_time_series=None,
number_of_channels=None,
samples_per_window=None,
sample_data=None,
own_colormap=None,
error_type=error_type,
mean_time_series=dict(),
time_series_histo=dict(),
error=dict(),
mean_classification_target=None,
samples_per_condition=dict(),
normalizer=None,
trainable=self.trainable,
corr_important_feats=dict(),
corr_important_feat_names=None,
labeled_corr_matrix=dict(),
channel_names=None,
indexlist=None,
ts_plot=None,
histo_plot=None,
corr_plot=dict(),
feature_development_plot=dict(),
axflip=axflip,
emotiv=emotiv
)
def is_trainable(self):
""" Returns whether this node is trainable. """
return self.trainable
def is_supervised(self):
""" Returns whether this node requires supervised training """
return self.trainable
def _execute(self, data):
""" Nothing to be done here """
return data
def get_last_timeseries_from_history(self, data):
n = len(data.history)
for i in range(n):
if type(data.history[n-1-i]) == TimeSeries:
return data.history[n-1-i]
raise LookupError('FeatureVis found no TimeSeries object to plot.' +
' Add "keep_in_history : True" to the last node that produces' +
' time series objects in your node chain!')
def _train(self, data, label):
"""
Add the given data point along with its class label
to the training set, i.e. update 'mean' time series and append to
the complete data.
"""
# Check i
# This is needed only once
if self.feature_vector == None and self.number_of_channels == None:
# if we have a prediction vector we are at a processing stage
# after a classifier. The used features can than be found in
# data.predictor.features.
try:
self.feature_vector = data.predictor.features[0] #[0]!!
print "FeatureVis: Found Features in current data object."
# If we find no data.predictor.features, simply go on without
except:
print "FeatureVis: Found no Features at all."
# If the current object is no time series, get the last time series
# from history. This will raise an exception if there is none.
if type(data) != TimeSeries:
data = self.get_last_timeseries_from_history(data)
# If this is the first data sample we obtain
if self.number_of_channels == None:
# Count the number of channels & samples per window
self.number_of_channels = data.shape[1]
self.sample_data = data
self.samples_per_window = data.shape[0]
self.channel_names = data.channel_names
# If we encounter this label for the first time
if label not in self.mean_time_series.keys():
# Create the class mean time series lazily
self.mean_time_series[label] = data
self.time_series_histo[label] = []
else:
# If label exists, just add data
self.mean_time_series[label] += data
self.time_series_histo[label].append(data)
# Count the number of samples per class
self.samples_per_condition[label] = \
self.samples_per_condition.get(label, 0) + 1
def _stop_training(self, debug=False):
"""
Finish the training, i.e. for the time series plots: take the
accumulated time series and divide by the number of samples per
condition.
For the
"""
# Compute avg
for label in self.mean_time_series.keys():
self.mean_time_series[label] /= self.samples_per_condition[label]
self.time_series_histo[label] = \
pylab.array(self.time_series_histo[label])
# Compute error of desired type - strip the numerals:
if self.error_type is not None:
if self.error_type.strip('0123456789.') == 'SampleStdDev':
self.error[label] = \
pylab.sqrt(pylab.var(self.time_series_histo[label],0))
elif self.error_type.strip('0123456789.') == 'StdError':
self.error[label] = \
pylab.sqrt(pylab.var(self.time_series_histo[label],0)) /\
pylab.sqrt(pylab.shape(self.time_series_histo[label])[0])
multiplier = float(''.join([nr for nr in self.error_type
if (nr.isdigit() or nr == ".")]))
self.error[label] = multiplier * self.error[label]
# other plots only if features where passed
if (self.feature_vector != None):
self.feature_time_series = \
convert_feature_vector_to_time_series(self.feature_vector,
self.sample_data)
# in the alternative scaling space, the feature "importance" is
# determined by the feature values
# weighted by the expected difference in time series values
# between the two classes (difference of avg std and avg target)
# The standard P3 and LRP cases are handeled separately to make
# sure that the sign of the difference is consistent
if self.alternative_scaling:
if all(
[True if label_iter in ['Target', 'Standard'] else False
for label_iter in self.mean_time_series.keys()]):
self.feature_time_series*=(
self.mean_time_series['Target']-
self.mean_time_series['Standard'])
elif all(
[True if label_iter in ['LRP', 'NoLRP'] else False
for label_iter in self.mean_time_series.keys()]):
self.feature_time_series*=(
self.mean_time_series['LRP']-
self.mean_time_series['NoLRP'])
else:
self.feature_time_series*=(
self.mean_time_series[self.mean_time_series.keys()[0]]-
self.mean_time_series[self.mean_time_series.keys()[1]])
print "AverageFeatureVis (alternative_scaling): " +\
"Present classes don't match the standards " +\
"(Standard/Target or LRP/NoLRP). Used the difference "+\
"%s - %s" % (self.mean_time_series.keys()[0],
self.mean_time_series.keys()[1]) +" for computation "+\
"of the alternative scaling."
# greatest feature val that occures is used for the normalization
# of the color-representation of the feature values
self.max_feature_val = \
(abs(self.feature_time_series)).max(0).max(0)
self.normalizer = colors.Normalize(vmin=-self.max_feature_val,
vmax= self.max_feature_val)
cdict={ 'red':[(0.0, 1.0, 1.0),(0.5, 1.0, 1.0),(1.0, 0.0, 0.0)],
'green':[(0.0, 0.0, 0.0),(0.5, 1.0, 1.0),(1.0, 0.0, 0.0)],
'blue':[(0.0, 0.0, 0.0),(0.5, 1.0, 1.0),(1.0, 1.0, 1.0)]}
self.own_colormap = \
colors.LinearSegmentedColormap('owncm', cdict, N=256)
# sort the features with descending importance
self.indexlist=pylab.transpose(self.feature_time_series.nonzero())
indexorder = abs(self.feature_time_series
[abs(self.feature_time_series) >
self.important_feature_thresh]).argsort()
self.indexlist = self.indexlist[indexorder[-1::-1]] #reverse order
self.indexlist = map(list,self.indexlist[
:len(self.feature_vector)*self.percentage_of_features/100])
self.histo_plot = self._generate_histo_plot()
try:
# try to generate a plot of the feature crosscorrelation
# matrix. Might fail if the threshold is set such that no
# features are left.
for label in self.mean_time_series.keys():
self.labeled_corr_matrix[label] = \
self._generate_labeled_correlation_matrix(label)
self.corr_plot[label] = \
self._get_corr_plot(self.corr_important_feats[label],
label)
# if 2 class labels exist, also compute the difference in the
# cross correlation between the classes.
if len(self.corr_important_feats.keys()) == 2:
self.corr_plot['Diff'] = self._get_corr_plot((
self.corr_important_feats
[self.corr_important_feats.keys()[0]]
- self.corr_important_feats
[self.corr_important_feats.keys()[1]]),
self.corr_important_feats.keys()[0] + ' - ' + \
self.corr_important_feats.keys()[1])
except TypeError:
import warnings
warnings.warn("\n\nFeatureVis doesn't have enough important" +
" features left for correlation plots..."+
" Check threshold.\n")
# Compute avg time series plot anyway
self.ts_plot = self._generate_time_series_plot()
def store_state(self, result_dir, index=None):
""" Stores all generated plots in the given directory *result_dir* """
if self.store:
node_dir = os.path.join(result_dir, self.__class__.__name__)
if not index == None:
node_dir += "_%d" % int(index)
create_directory(node_dir)
if (self.ts_plot != None):
name = 'timeseries_sp%s.pdf' % self.current_split
self.ts_plot.savefig(os.path.join(node_dir, name),
bbox_inches="tight")
if (self.histo_plot != None):
name = 'histo_sp%s.pdf' % self.current_split
self.histo_plot.savefig(os.path.join(node_dir, name),
bbox_inches="tight")
for label in self.labeled_corr_matrix.keys():
name = 'Feature_Correlation_%s_sp%s.txt' % (label,
self.current_split)
pylab.savetxt(os.path.join(node_dir, name),
self.labeled_corr_matrix[label], fmt='%s',
delimiter=' ')
name = 'Feature_Development_%s_sp%s.pdf' % (label,
self.current_split)
self.feature_development_plot[label].savefig(
os.path.join(node_dir, name))
for label in self.corr_plot.keys():
name = 'Feature_Correlation_%s_sp%s.pdf' % (label,
self.current_split)
self.corr_plot[label].savefig(os.path.join(node_dir, name))
pylab.close("all")
def _generate_labeled_correlation_matrix(self, label):
""" Concatenates the feature names to the actual correlation matrices.
This is for better overview in stored txt files later on."""
labeled_corr_matrix = pylab.array([])
for i in pylab.array(self.corr_important_feats[label]):
if len(labeled_corr_matrix) == 0:
labeled_corr_matrix = [[('% .2f' % j).rjust(10) for j in i]]
else:
labeled_corr_matrix = pylab.vstack((labeled_corr_matrix,
[[('% .2f' % j).rjust(10) for j in i]]))
labeled_corr_matrix = pylab.c_[self.corr_important_feat_names,
labeled_corr_matrix]
labeled_corr_matrix = pylab.vstack((pylab.hstack((' ',
self.corr_important_feat_names)),
labeled_corr_matrix))
return labeled_corr_matrix
def _generate_time_series_plot(self):
""" This function generates the actual time series plot"""
# This string will show up as text in the plot and looks something
# like "Target: 123; Standard:634"
samples_per_condition_string = \
"; ".join([("%s: " + str(self.samples_per_condition[label]))
% label for label in self.mean_time_series.keys()])
figTS = pylab.figure()
# Compute number of rows and cols for subplot-arrangement:
# use 8 as upper limit for cols and compute rows accordingly
if self.number_of_channels <= 8: nr_of_cols = self.number_of_channels
else: nr_of_cols = 8
nr_of_rows = (self.number_of_channels - 1) / 8 + 1
# Set canvas size in inches. These values turned out fine, depending
# on [physiological_arrengement] and [shrink_plots]
if not self.physiological_arrangement:
figTS.set_size_inches((5 * nr_of_cols, 3 * nr_of_rows))
ec_2d = None
else:
if not self.shrink_plots:
figTS.set_size_inches((3*11.7, 3*8.3))
else:
figTS.set_size_inches((4*11.7, 4*8.3))
ec = self.getMetadata("electrode_coordinates")
if ec is None:
ec = StreamDataset.ec
ec_2d = StreamDataset.project2d(ec)
# plot everything channel-wise
for i_chan in range(self.number_of_channels):
figTS.add_subplot(nr_of_rows, nr_of_cols, i_chan + 1)
# actual plotting of the data. This can always be done
for tslabel in self.mean_time_series.keys():
tmp_plot=pylab.plot(self.mean_time_series[tslabel][:, i_chan],
label=tslabel)
cur_color = tmp_plot[0].get_color()
if self.error_type != None:
for sample in range(self.samples_per_window):
current_error = self.error[label][sample, i_chan]
pylab.bar(sample-.35, 2*current_error, width=.7,
bottom=self.mean_time_series[tslabel][sample, i_chan]
-current_error,
color=cur_color, ec=None, alpha=.3)
# plotting of features; only if features present
if (self.feature_time_series != None):
# plot those nice grey circles
pylab.plot(self.feature_time_series[:, i_chan],
'o', color='0.5', label='Feature', alpha=0.5)
for sample in range(self.samples_per_window):
if [sample, i_chan] in self.indexlist:
# write down value...
pylab.text(sample,
self.feature_time_series[sample, i_chan],
'%.2f' %
self.feature_time_series[sample, i_chan],
ha='center', color='black',
size='xx-small')
# ...compute the corresponding color-representation...
marker_color = \
self.own_colormap(self.normalizer(\
self.feature_time_series[sample, i_chan]))
# ...and draw vertical boxes at the feature's position
pylab.axvspan(sample - .25, sample + .25,
color=marker_color,
ec=None, alpha=.8)
# more format. and rearrangement in case of [phys_arrengement]
self._format_subplots('mean time series', i_chan,
samples_per_condition_string, ec_2d)
# in case of [phys_arrengement], write the figure title only once
# and large in the upper left corner of the plot. this fails whenever
# there are no more channels in that area, as the plot gets cropped
if self.physiological_arrangement:
h, l = pylab.gca().get_legend_handles_labels()
prop = matplotlib.font_manager.FontProperties(size='xx-large')
figTS.legend(h, l, prop=prop, loc=1)
if not self.emotiv:
text_x = .1
text_y = .92
else:
text_x = .4
text_y = .4
if self.shrink_plots: text_y = 1.2
figTS.text(text_x, text_y, 'Channel-wise mean time series\n' +
samples_per_condition_string,
ha='center', color='black', size=32)
return figTS
def _format_subplots(self, type, i_chan, samples_per_condition_string, ec=None):
""" Some time series plot formatting. Mainly writes the channel names
into the axes, sets titles and rearranges the axes for
physiological_arrengement. Also flips axes if desired by setting
axflip = True """
# Plot zero line into every single subplot
pylab.plot(range(1, self.samples_per_window + 1),
pylab.zeros(self.samples_per_window), 'k--')
# current axis limits:
pylab.gca().set_xlim(xmin=0 - .5, xmax=self.samples_per_window + .5)
if self.axflip:
tempax = pylab.gca()
# reverse ylim
tempax.set_ylim(tempax.get_ylim()[::-1])
if not self.physiological_arrangement:
# every subplot gets a title
pylab.title('Channel-wise ' + type + ' - ' +
samples_per_condition_string,
ha='center', color='black', size='x-small')
# and a small legend
prop = matplotlib.font_manager.FontProperties(size='x-small')
pylab.legend(prop=prop)
else:
# do the physiological arrangement
x, y = ec[self.channel_names[i_chan]]
w = .07
h = .065
if self.shrink_plots:
w *= 1.2
h *= 0.9
x *= 4.0/3.0
y *= 4.0/3.0
pylab.gca().set_position([(x + 110) / 220, (y + 110) / 220, w, h])
# get current axis limits...
cal = pylab.gca().axis()
# ... and place channel name at a nice upper left position
pylab.text((.85 * cal[0] + .15 * cal[1]), (.8 * cal[3] + .2 * cal[2]),
self.channel_names[i_chan], ha='center', color='black',
size='xx-large')
def _generate_histo_plot(self):
""" This function generates the actual histogram plot"""
fighist = pylab.figure()
nr_of_feats = len(self.indexlist)
# again, number of subplot columns is 8 at most while
# using as many rows as necessary
if nr_of_feats <= 8: nr_of_cols = nr_of_feats
else: nr_of_cols = 8
nr_of_rows = (len(self.indexlist) - 1) / 8 + 1
fighist.set_size_inches((5 * nr_of_cols, 3 * nr_of_rows))
important_features = dict()
important_feature_names = pylab.array([])
itercount = 1
for feat_index in self.indexlist:
# backgroundcolor for the feature importance text
bg_color = self.own_colormap(self.normalizer(\
self.feature_time_series[tuple(feat_index)]))
fighist.add_subplot(nr_of_rows, nr_of_cols, itercount)
itercount += 1
# plot the actual histogram
pylab.hist([self.time_series_histo[label]
[:, feat_index[0], feat_index[1]]
for label in self.mean_time_series.keys()],
bins=20, normed=True , histtype='step')
# write feature importance as fig.text
cal = pylab.gca().axis()
pylab.text((.23 * cal[0] + .77 * cal[1]),
(.8 * cal[3] + .2 * cal[2]), '%.2f' %
self.feature_time_series[feat_index[0], feat_index[1]],
fontsize='xx-large',
bbox=dict(fc=bg_color, ec=bg_color, alpha=0.6, pad=14))
# Title uses feature name
pylab.title('Channel %s at %dms' %
(self.channel_names[feat_index[1]],
float(feat_index[0]) /
self.sample_data.sampling_frequency * 1000),
fontsize='x-large')
# initialize, if no important features known yet
if important_features.values() == []:
for label in self.mean_time_series.keys():
important_features[label] = \
pylab.array(self.time_series_histo[label][:,
feat_index[0], feat_index[1]])
# stack current important feature with previous
else:
for label in self.mean_time_series.keys():
important_features[label] = pylab.vstack(
(important_features[label],
pylab.array(self.time_series_histo[label]
[:, feat_index[0], feat_index[1]])))
# memorize feature name
important_feature_names = \
pylab.append(important_feature_names, \
[('%s' % self.channel_names[feat_index[1]]).ljust(4, '_')\
+ ('%dms' % (float(feat_index[0]) / \
self.sample_data.sampling_frequency * 1000)).rjust(6, '_')])
self.corr_important_feat_names = important_feature_names
for label in important_features.keys():
self.corr_important_feats[label] = \
pylab.corrcoef(important_features[label])
# Draw the "feature development" plots of the important features
self._generate_feature_development_plots(important_features)
return fighist
def _generate_feature_development_plots(self, important_features):
""" This function generates the actual histogram plot"""
# Everything is done class-wise
for label in important_features.keys():
# Axis limits are determined by the global maxima
(minVal, maxVal) = (important_features[label].min(0).min(0),
important_features[label].max(0).max(0))
nr_chans = pylab.shape(important_features[label])[0]
myFig = pylab.figure()
myFig.set_size_inches((40,nr_chans))
for i_chan in range(nr_chans):
ax = myFig.add_subplot(nr_chans, 1, i_chan+1)
# cycle line colors
if (pylab.mod(i_chan,2) == 0): myCol = '#000080'
else: myCol = '#003EFF'
# plot features and black zero-line
pylab.plot(important_features[label][i_chan,:],color=myCol)
pylab.plot(range(len(important_features[label][i_chan,:])),
pylab.zeros(len(important_features[label][i_chan,:])),
'k--')
pylab.ylim((minVal,maxVal))
xmax = pylab.shape(important_features[label])[1]
pylab.xlim((0,xmax))
# draw vertical dashed line every 20 epochs
for vertical_line_position in range(0,xmax+1,20):
pylab.axvline(x=vertical_line_position,
ymin=0, ymax=1, color='k', linestyle='--')
# write title above uppermost subplot
if i_chan+1 == 1:
pylab.title('Feature development: Amplitudes of %s Epochs'
% label, fontsize=40)
# adjust the axes, i.e. remove upper and right,
# shift the left to avoid overlaps,
# and lower axis only @ bottom subplot
if i_chan+1 < nr_chans:
self._adjust_spines(ax,['left'],i_chan)
else:
self._adjust_spines(ax,['left', 'bottom'],i_chan)
pylab.xlabel('Number of Epoch', fontsize=36)
# Write feature name next to the axis
pylab.ylabel(self.corr_important_feat_names[i_chan],
fontsize=20, rotation='horizontal')
# remove whitespace between subplots etc.
myFig.subplots_adjust(bottom=0.03,left=0.08,right=0.97,
top=0.94,wspace=0,hspace=0)
self.feature_development_plot[label] = myFig
def _get_corr_plot(self, corr_matrix, label):
""" Plot the current correlation matrix as filled contour plot
and return figure instance. """
figCorrelation = pylab.figure()
pylab.imshow(corr_matrix, interpolation='nearest', vmin=-1, vmax=1)
pylab.gca().set_ylim((len(corr_matrix) - 0.5, -0.5))
pylab.gca().set_yticks(range(len(corr_matrix)))
pylab.gca().set_yticklabels(self.corr_important_feat_names,
size='xx-small')
pylab.gca().set_xticks(range(len(corr_matrix)))
pylab.gca().set_xticklabels(self.corr_important_feat_names,
rotation='vertical', size='xx-small')
pylab.gca().set_title(label)
pylab.colorbar()
return figCorrelation
# def _get_electrode_coordinates(self, key):
# """ Convert the polar coordinates of the electrode positions
# to cartesian coordinates for the positioning of the physiologically
# arranged plots. As the position specification also requires a height
# and width, these values are also passed. Height and width are tuned
# manually such that the resulting plots look nice. """
# # coordinate transformation
# x = (self.ec[key][0] *
# pylab.cos(self.ec[key][1] / 180 * pylab.pi) + 110) / 220
# y = (self.ec[key][0] *
# pylab.sin(self.ec[key][1] / 180 * pylab.pi) + 110) / 220
# w = .07
# h = .065
# if self.shrink_plots:
# w *= 1.2
# h *= 0.9
# x *= 4.0/3.0
# y *= 4.0/3.0
#
# return [x, y, w, h]
def _adjust_spines(self,ax,spines,i_chan):
""" Essentially, removes most of the axes in the feature development
plots. Also produces the alternating shift of the left axes. """
for loc, spine in ax.spines.iteritems():
if loc in spines:
if ((loc=='left') and (pylab.mod(i_chan,2) == 0)):
spine.set_position(('outward',5))
else:
spine.set_position(('outward',30)) # outward by 10 points
else:
spine.set_color('none') # don't draw spine
ax.yaxis.set_ticks_position('left')
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(30)
else: ax.xaxis.set_ticks([]) # no xaxis ticks
|
gpl-3.0
|
Obus/scikit-learn
|
examples/neighbors/plot_approximate_nearest_neighbors_scalability.py
|
225
|
5719
|
"""
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
|
bsd-3-clause
|
karoraw1/GLM_Wrapper
|
bin/GEO_Database.py
|
1
|
8629
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 29 15:36:20 2016
@author: login
"""
from LakeModel import make_dir
import os, mmap, time
import urllib2
import pandas as pd
import numpy as np
from multiprocessing import Pool
import shutil
from contextlib import closing
def is_archive(extension):
if extension.upper() == '.HDF':
return True
elif extension.upper() == '.HE5':
return True
else:
return False
def is_xml(extension):
if extension.upper() == '.XML':
return True
else:
return False
def maybe_download(url):
"""Download a file if not present, and make sure it's the right size."""
filename = os.path.basename(url)
if filename[-3:] == 'xml' or filename[-3:] == 'he5':
if not os.path.exists(filename):
start_dl = time.time()
with closing(urllib2.urlopen(url)) as r:
with open(filename, 'wb') as f:
shutil.copyfileobj(r, f)
print time.time()-start_dl
return filename
else:
return "AlreadyDownloaded"
else:
print "unexpected file name skipped"
pass
return "NotAFile"
def textDict(path, newDict):
"pass in the path to a text file and a dictionary to-be-filled with lines"
f = open(path, 'r')
for idx, line in enumerate(f):
newDict[idx] = line.strip()
f.close()
return newDict
def parseXML(xmlfile):
tags = ['RangeDateTime', 'GranuleID']
f = open(xmlfile)
s = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
## Initialize variable to return with observed file name
head1 = s.find('RangeDateTime')
head2 = s.find('GranuleID')
mdata = [xmlfile]
if 'RangeDateTime' in tags:
## Grab time & date info & package into tuple
s.seek(head1)
s.readline()
start_date = s.readline()[24:34]
start_time = s.readline()[24:32]
end_date = s.readline()[21:31]
end_time = s.readline()[21:29]
start_string = start_date + " " + start_time
end_string = end_date + " " + end_time
mdata.append(start_string)
mdata.append(end_string)
else:
mdata.append("")
mdata.append("")
if 'GranuleID' in tags:
## Grab file name and append to list
s.seek(head2+10)
mdata.append(s.readline()[:-13])
else:
mdata.append("")
s.close()
f.close()
return mdata
class GEO_Database(object):
def __init__(self, name, TestBool=True, BuildBool=True):
self.meta_df = None
print "Is this a test run? ", TestBool
print "Is this a full build? ", BuildBool
self.test = TestBool
self.build = BuildBool
self.meta_db = {}
self.data_db = {}
self.name = os.path.join(os.path.split(os.getcwd())[0], 'weatherData',
name)
make_dir(self.name)
self.mdata_csv = os.path.join(self.name, "meta_table.csv")
if os.path.exists(self.mdata_csv):
print "There appears to be an existing metadata table in this location"
print "To use this intermediate build, use read_metadata_table()`"
#get the test path
self.test_path = os.path.join(os.path.split(os.getcwd())[0],
'test_files')
def read_metadata_URL_list(self, meta_text = None):
# `meta_text` can be modified by the user to a distinct list of meta
# data files posted at GEODISC, or another ftp server. The two samples
# differ considerably in size, to speed up testing.
if meta_text is None:
if self.test is False:
self.meta_text = os.path.join(self.test_path,
'GEO_MetaDataOnly.txt')
elif self.test is True:
self.meta_text = os.path.join(self.test_path,
'test_xml_list120.txt')
else:
self.meta_text = meta_text
self.meta_db = {}
self.meta_db = textDict(self.meta_text, self.meta_db)
def testsplit(self):
self.read_metadata_URL_list()
self.keepers = self.meta_db.values()
mixed_urls = self.all_urls.items()
self.keeper_files = set([os.path.basename(i)[:-4] for i in self.keepers])
self.mixed_files = {os.path.basename(v): (i,v) for i, v in mixed_urls}
for kf in self.keeper_files:
l, u = self.mixed_files[kf]
self.data_db[l] = u
def read_combined_URL_list(self, combo_text=None):
if combo_text is None:
self.combo_text = os.path.join(self.test_path,
'GEO_DataAndMetaData.txt')
else:
self.combo_text = combo_text
self.all_urls = {}
self.all_urls = textDict(self.combo_text, self.all_urls)
old_mdata = self.meta_db.values()
if self.test is True:
self.testsplit()
else:
for k,v in self.all_urls.items():
if is_archive(v[-4:]):
self.data_db[k] = v
elif is_xml(v[-4:]) and v[-4:] not in old_mdata:
self.meta_db[k] = v
def read_metadata_table(self, meta_csv_path):
if self.meta_df is None:
self.meta_df = pd.read_csv(meta_csv_path, index_col=0)
else:
print "This one is created already"
def download_data(self, type):
if type == 'metadata':
samples = self.meta_db.values()
print "%r metadata files required" % len(samples)
fns = {os.path.basename(i):i for i in samples}
for fn in fns.keys():
dest = os.path.join(self.name, fn)
if os.path.exists(dest):
samples.remove(fns[fn])
procs = 10
print "%r metadata files not located" % len(samples)
# If no previous build detected, download proceeds
print "Downloading began at ", time.ctime()
time_m_dl = time.time()
pool = Pool(processes=procs)
self.results = pool.map(maybe_download, samples)
print "Downloading ended after %r s." % (time.time()-time_m_dl)
s_right = [os.path.join(self.name, os.path.basename(i)) for i in samples]
self.s_wrong = [os.path.join(os.getcwd(), os.path.basename(i)) for i in samples]
for idx, s in enumerate(self.s_wrong):
if os.path.exists(s):
shutil.move(s, s_right[idx])
self.meta_db_files = s_right
def check_table_to_database(self, table_choice):
## Remove records without a file name
clean_clouds = cloud_df[cloud_df.hdf5.notnull()]
## Verify that metadata records are matched to existing archive URLs
for f_n in list(clean_clouds.hdf5):
if f_n in all_files.keys():
data_urls.append(all_files[f_n])
else:
bad_urls.append(all_files[f_n])
## If any metadata records are not located, this assertion fails
archives = len(data_urls)
try:
assert archives == clean_clouds.shape[0]
except AssertionError:
print archives, " archives"
print clean_clouds.shape[0], "metadata recs"
assert archives == clean_clouds.shape[0]
def write_metadata_table(self, tags = ['RangeDateTime', 'GranuleID']):
if len(self.meta_db_files) == 0:
for f in self.meta_db.values():
shouldbe = os.path.join(self.name, os.path.basename(f))
if os.path.exists(shouldbe):
self.meta_db_files.append(shouldbe)
pool = Pool(processes=len(self.meta_db_files))
self.timeIDlist = pool.map(parseXML, self.meta_db_files)
row_n = len(self.timeIDlist)
col_n = len(self.timeIDlist[0])
cols = ['Path', 'Start', 'End', 'file']
cloud_data = np.array(self.timeIDlist).reshape((row_n,col_n))
self.meta_df = pd.DataFrame(index = self.meta_db_files, columns = cols,
data = cloud_data)
self.out_csv = os.path.join(self.name, "meta_db.csv")
self.meta_df.to_csv(self.out_csv)
|
mit
|
hongguangguo/shogun
|
examples/undocumented/python_modular/graphical/metric_lmnn_objective.py
|
26
|
2350
|
#!/usr/bin/env python
def load_compressed_features(fname_features):
try:
import gzip
import numpy
except ImportError:
print 'Error importing gzip and/or numpy modules. Please, verify their installation.'
import sys
sys.exit(0)
# load features from a gz compressed file
file_features = gzip.GzipFile(fname_features)
str_features = file_features.read()
file_features.close()
strlist_features = str_features.split('\n')[:-1] # all but last because the last line also has \n
# the number of lines in the file is the number of vectors
num_vectors = len(strlist_features)
# the number of elements in a line is the number of features
num_features = len(strlist_features[0].split())
# memory pre-allocation for the feature matrix
fm = numpy.zeros((num_vectors, num_features))
# fill in feature matrix
for i in xrange(num_vectors):
try:
fm[i,:] = map(numpy.float64, strlist_features[i].split())
except ValuError:
print 'All the vectors must have the same number of features.'
import sys
sys.exit(0)
return fm
def metric_lmnn_statistics(k=3, fname_features='../../data/fm_train_multiclass_digits.dat.gz', fname_labels='../../data/label_train_multiclass_digits.dat'):
try:
from modshogun import LMNN, CSVFile, RealFeatures, MulticlassLabels, MSG_DEBUG
import matplotlib.pyplot as pyplot
except ImportError:
print 'Error importing modshogun or other required modules. Please, verify their installation.'
return
features = RealFeatures(load_compressed_features(fname_features).T)
labels = MulticlassLabels(CSVFile(fname_labels))
# print 'number of examples = %d' % features.get_num_vectors()
# print 'number of features = %d' % features.get_num_features()
assert(features.get_num_vectors() == labels.get_num_labels())
# train LMNN
lmnn = LMNN(features, labels, k)
lmnn.set_correction(100)
# lmnn.io.set_loglevel(MSG_DEBUG)
print 'Training LMNN, this will take about two minutes...'
lmnn.train()
print 'Training done!'
# plot objective obtained during training
statistics = lmnn.get_statistics()
pyplot.plot(statistics.obj.get())
pyplot.grid(True)
pyplot.xlabel('Iterations')
pyplot.ylabel('LMNN objective')
pyplot.title('LMNN objective during training for the multiclass digits data set')
pyplot.show()
if __name__=='__main__':
print('LMNN objective')
metric_lmnn_statistics()
|
gpl-3.0
|
PrashntS/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
128
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
qilicun/python
|
python2/ecl_sum.py
|
1
|
46554
|
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file 'ecl_sum.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module for loading and querying summary data.
The low-level organisation of summary data is extensively documented
in the C source files ecl_sum.c, ecl_smspec.c and ecl_sum_data in the
libecl/src directory.
"""
# Observe that there is some convention conflict with the C code
# regarding order of arguments: The C code generally takes the time
# index as the first argument and the key/key_index as second
# argument. In the python code this order has been reversed.
import libecl
from ert.cwrap.cwrap import *
from ert.cwrap.cclass import CClass
from ert.util.stringlist import StringList
from ert.util.ctime import ctime
import numpy
#import ert.ecl_plot.sum_plot as sum_plot
# The date2num function is a verbatim copy of the _to_ordinalf()
# function from the matplotlib.dates module. Inserted here only to
# avoid importing the full matplotlib library. The date2num
# implementation could be replaced with:
#
# from matplotlib.dates import date2num
HOURS_PER_DAY = 24.0
MINUTES_PER_DAY = 60*HOURS_PER_DAY
SECONDS_PER_DAY = 60*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
def date2num( dt ):
"""
Convert a python datetime instance to UTC float days.
Convert datetime to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds, return value
is a float. The function is a verbatim copy of the _to_ordinalf()
function from the matplotlib.dates module.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY +
dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY +
dt.microsecond/MUSECONDS_PER_DAY )
return base
class EclSumNode:
def __init__(self , mini_step , report_step , days , date , mpl_date , value):
"""
EclSumNode is a 'struct' with a summary value and time.
EclSumNode - a small 'struct' with a summary value and time in
several formats. When iterating over a EclSumVector instance
you will get EclSumNode instances. The content of the
EclSumNode type is stored as plain attributes:
value : The actual value
report_step : The report step
mini_step : The ministep
days : Days since simulation start
date : The simulation date
mpl_date : A date format suitable for matplotlib
"""
self.value = value
self.report_step = report_step
self.mini_step = mini_step
self.days = days
self.date = date
self.mpl_date = mpl_date
def __str__(self):
return "<EclSumNode: days:%g value:%g>" % ( self.days , self.value )
class EclSumVector:
def __init__(self , parent , key , report_only):
"""
A summary vector with a vector of values and time.
A summary vector contains the the full time history of one
key, along with the corresponding time vectors in several
different time formats. Depending on the report_only argument
the data vectors in the EclSumVector can either contain all
the time values, or only those corresponding to report_steps.
The EclSumVector contains a reference to the parent EclSum
structure and this is used to implement several of the
properties and methods of the object; the EclSum vector
instances should therefor only be instantiated through the
EclSum.get_vector() method, and not manually with the
EclSumVector() constructor.
"""
self.parent = parent
self.key = key
self.report_only = report_only
self.__dates = parent.get_dates( report_only )
self.__days = parent.get_days( report_only )
self.__mpl_dates = parent.get_mpl_dates( report_only )
self.__mini_step = parent.get_mini_step( report_only )
self.__report_step = parent.get_report_step( report_only )
self.__values = None
def __str__(self):
return "<Summary vector: %s>" % self.key
@property
def unit( self ):
"""
The unit of this vector.
"""
return self.parent.get_unit( self.key )
def assert_values( self ):
"""
This function will load and internalize all the values.
"""
if self.__values == None:
self.__values = self.parent.get_values( self.key , self.report_only )
@property
def values( self ):
"""
All the summary values of the vector, as a numpy vector.
"""
self.assert_values( )
return self.__values
@property
def dates( self ):
"""
All the dates of the vector, list of datetime() instances.
"""
return self.__dates
@property
def days( self ):
"""
The time in days as a numpy vector.
In the case of lab unit this will be hours.
"""
return self.__days
@property
def mpl_dates( self ):
"""
All the dates as numpy vector of dates in matplotlib format.
"""
return self.__mpl_dates
@property
def mini_step( self ):
"""
All the ministeps of the vector.
Ministeps is the ECLIPSE notion of timesteps. The ministeps
are numbered sequentially starting at zero; if you have loaded
the entire simulation the ministep number will correspond to
the natural indexing. The length of each ministep is
determined by the convergence properties of the ECLIPSE
simulation.
"""
return self.__mini_step
@property
def report_step( self ):
"""
All the report_step of the vector.
"""
return self.__report_step
def __iget__( self , index ):
"""
Will return an EclSumNode for element @index; should be called
through the [] operator, otherwise you can come across
unitialized data.
"""
return EclSumNode( self.__mini_step[index] ,
self.__report_step[index] ,
self.__days[index] ,
self.__dates[index] ,
self.__mpl_dates[index] ,
self.__values[index])
def __len__(self):
"""
The length of the vector - used for the len() builtin.
"""
return len(self.__days)
def __getitem__(self , index):
"""
Implements the [] operator.
Will return EclSumNode instance according to @index. The index
value will be interpreted as in a normal python [] lookup,
i.e. negative values will be interpreted as starting from the
right and also slice notation is allowed[*].
[*] Observe that in the case of slices the return value will
not be a proper EclSumVector instance, but rather a normal
Python list of EclSumNode instances.
"""
self.assert_values( )
length = len( self.values )
if isinstance( index, int):
if index < 0:
index += len(self.__values)
if index < 0 or index > length:
raise KeyError("Invalid index:%d out of range [0:%d)" % ( index , length))
else:
return self.__iget__( index )
elif isinstance( index , slice ):
# Observe that the slice based lookup does __not__ return
# a proper EclSumVector instance; it will merely return
# a simple Python list with EclSumNode instances.
(start , stop , step) = index.indices( length )
index = start
sub_vector = []
while index < stop:
sub_vector.append( self.__iget__(index) )
index += step
return sub_vector
raise KeyError("Invalid index:%s - must have integer or slice." % index)
@property
def first( self ):
"""
Will return the first EclSumNode in this vector.
"""
self.assert_values( )
return self.__iget__( 0 )
@property
def last( self ):
"""
Will return the last EclSumNode in this vector.
"""
self.assert_values( )
index = len(self.__values) - 1
return self.__iget__( index )
@property
def last_value( self ):
"""
Will return the last value in this vector.
"""
self.assert_values( )
index = len(self.__values) - 1
return self.__iget__( index ).value
def get_interp( self , days = None , date = None):
"""
Will lookup value interpolated to @days or @date.
The function requires one, and only one, time indicator in
terms of @days or @date. If the @date variable is given that
should be Python datetime instance.
vec = sum["WWCT:A-3"]
vec.get_interp( days = 100 )
vec.get_interp( date = datetime.date( year , month , day ))
This function will crash and burn if the time arguments are
invalid; if in doubt you should check first.
"""
return self.parent.get_interp( self.key , days , date )
def get_interp_vector( self , days_list = None , date_list = None):
"""
Will return Python list of interpolated values.
See get_interp() for further details.
"""
return self.parent.get_interp_vector( self.key , days_list , date_list )
def get_from_report( self , report_step ):
"""
Will lookup the value based on @report_step.
"""
return self.parent.get_from_report( self.key , report_step )
#################################################################
def first_gt_index( self , limit ):
"""
Locates first index where the value is above @limit.
Observe that this method will raise an exception if it is
called from a vector instance with report_only = True.
"""
if not self.report_only:
key_index = cfunc.get_general_var_index( self.parent , self.key )
time_index = cfunc.get_first_gt( self.parent , key_index , limit )
return time_index
else:
raise Exception("Sorry - first_gt_index() can not be called for vectors with report_only=True")
def first_gt( self , limit ):
"""
Locate the first EclSumNode where value is above @limit.
vec = sum["WWCT:A-3"]
w = vec.first_gt( 0.50 )
print "Water cut above 0.50 in well A-3 at: %s" % w.date
Uses first_gt_index() internally and can not be called for
vectors with report_only = True.
"""
time_index = self.first_gt_index( limit )
print time_index
if time_index >= 0:
return self.__iget__( time_index )
else:
return None
def first_lt_index( self , limit ):
"""
Locates first index where the value is below @limit.
See first_gt_index() for further details.
"""
if not self.report_only:
key_index = cfunc.get_general_var_index( self.parent , self.key )
time_index = cfunc.get_first_lt( self.parent , key_index , limit )
return time_index
else:
raise Exception("Sorry - first_lt_index() can not be called for vectors with report_only=True")
def first_lt( self , limit ):
"""
Locates first element where the value is below @limit.
See first_gt() for further details.
"""
time_index = self.first_lt_index( limit )
if time_index >= 0:
return self.__iget__( time_index )
else:
return None
#def plot(self):
# sum_plot.plot_vector( self )
#################################################################
class EclSMSPECNode( CClass ):
"""
Small class with some meta information about a summary variable.
The summary variables have different attributes, like if they
represent a total quantity, a rate or a historical quantity. These
quantities, in addition to the underlying values like WGNAMES,
KEYWORD and NUMS taken from the the SMSPEC file are stored in this
structure.
"""
def __new__(cls , c_ptr , parent):
if c_ptr:
obj = object.__new__( cls )
obj.init_cref( c_ptr , parent )
return obj
else:
return None
@property
def is_total(self):
"""
Will check if the node corresponds to a total quantity.
The question of whether a variable corresponds to a 'total'
quantity or not can be interesting for e.g. interpolation
purposes. The actual question whether a quantity is total or
not is based on a hardcoded list in smspec_node_set_flags() in
smspec_node.c; this list again is based on the tables 2.7 -
2.11 in the ECLIPSE fileformat documentation.
"""
return cfunc.node_is_total( self )
@property
def is_rate(self):
"""
Will check if the variable in question is a rate variable.
The conecpt of rate variabel is important (internally) when
interpolation values to arbitrary times.
"""
return cfunc.node_is_rate( self )
@property
def is_historical(self):
"""
Checks if the key corresponds to a historical variable.
The check is only based on the last character; all variables
ending with 'H' are considered historical.
"""
return cfunc.node_is_historical( self )
@property
def unit(self):
"""
Returns the unit of this node as a string.
"""
return cfunc.node_unit( self )
@property
def wgname(self):
"""
Returns the WGNAME property for this node.
Many variables do not have the WGNAME property, i.e. the field
related variables like FOPT and the block properties like
BPR:10,10,10. For these variables the function will return
None, and not the ECLIPSE dummy value: ":+:+:+:+".
"""
return cfunc.node_wgname(self)
@property
def keyword(self):
"""
Returns the KEYWORD property for this node.
The KEYWORD property is the main classification property in
the ECLIPSE SMSPEC file. The properties of a variable can be
read from the KEWYORD value; see table 3.4 in the ECLIPSE file
format reference manual.
"""
return cfunc.node_keyword( self )
@property
def num(self):
"""
Returns the NUMS value for this keyword; or None.
Many of the summary keywords have an integer stored in the
vector NUMS as an attribute, i.e. the block properties have
the global index of the cell in the nums vector. If the
variable in question makes use of the NUMS value this property
will return the value, otherwise it will return None:
sum.smspec_node("FOPT").num => None
sum.smspec_node("BPR:1000").num => 1000
"""
if cfunc.node_need_num( self ):
return cfunc.node_num(self)
else:
return None
class EclSum( CClass ):
def __new__( cls , load_case , join_string = ":" , include_restart = True):
"""
Loads a new EclSum instance with summary data.
Loads a new summary results from the ECLIPSE case given by
argument @load_case; @load_case should be the basename of the ECLIPSE
simulation you want to load. @load_case can contain a leading path
component, and also an extension - the latter will be ignored.
The @join_string is the string used when combining elements
from the WGNAMES, KEYWORDS and NUMS vectors into a composit
key; with @join_string == ":" the water cut in well OP_1 will
be available as "WWCT:OP_1".
If the @include_restart parameter is set to true the summary
loader will, in the case of a restarted ECLIPSE simulation,
try to load summary results also from the restarted case.
"""
c_ptr = cfunc.fread_alloc( load_case , join_string , include_restart)
if c_ptr:
obj = object.__new__( cls )
obj.init_cobj( c_ptr , cfunc.free )
return obj
else:
return None
def __init__(self , load_case , join_string = ":" ,include_restart = True , c_ptr = None):
"""
Initialize a new EclSum instance.
See __new__() for further documentation.
"""
self.load_case = load_case
self.join_string = join_string
self.include_restart = include_restart
# Initializing the time vectors
length = self.length
self.__dates = [ 0 ] * length
self.__report_step = numpy.zeros( length , dtype = numpy.int32)
self.__mini_step = numpy.zeros( length , dtype = numpy.int32)
self.__days = numpy.zeros( length )
self.__mpl_dates = numpy.zeros( length )
for i in range( length ):
self.__days[i] = cfunc.iget_sim_days( self , i )
self.__dates[i] = cfunc.iget_sim_time( self , i).datetime()
self.__report_step[i] = cfunc.iget_report_step( self , i )
self.__mini_step[i] = cfunc.iget_mini_step( self , i )
self.__mpl_dates[i] = date2num( self.__dates[i] )
index_list = self.report_index_list()
length = len( index_list )
self.__datesR = [ 0 ] * length
self.__report_stepR = numpy.zeros( length , dtype = numpy.int32)
self.__mini_stepR = numpy.zeros( length , dtype = numpy.int32)
self.__daysR = numpy.zeros( length )
self.__mpl_datesR = numpy.zeros( length )
for i in range( length ):
time_index = index_list[ i ]
self.__daysR[i] = cfunc.iget_sim_days( self , time_index )
self.__datesR[i] = cfunc.iget_sim_time( self , time_index).datetime()
self.__report_stepR[i] = cfunc.iget_report_step( self , time_index )
self.__mini_stepR[i] = cfunc.iget_mini_step( self , time_index )
self.__mpl_datesR[i] = date2num( self.__datesR[i] )
def get_vector( self , key , report_only = False):
"""
Will return EclSumVector according to @key.
Will raise exception KeyError if the summary object does not
have @key.
"""
if self.has_key( key ):
return EclSumVector( self , key , report_only )
else:
raise KeyError("Summary object does not have key: %s" % key)
def report_index_list( self ):
"""
Internal function for working with report_steps.
"""
first_report = self.first_report
last_report = self.last_report
index_list = []
for report_step in range( first_report , last_report + 1):
time_index = cfunc.get_report_end( self , report_step )
index_list.append( time_index )
return index_list
def wells( self , pattern = None ):
"""
Will return a list of all the well names in case.
If the pattern variable is different from None only wells
matching the pattern will be returned; the matching is based
on fnmatch(), i.e. shell style wildcards.
"""
c_ptr = cfunc.create_well_list( self , pattern )
return StringList( c_ptr = c_ptr )
def groups( self , pattern = None ):
"""
Will return a list of all the group names in case.
If the pattern variable is different from None only groups
matching the pattern will be returned; the matching is based
on fnmatch(), i.e. shell style wildcards.
"""
c_ptr = cfunc.create_group_list( self , pattern )
return StringList( c_ptr = c_ptr )
def get_values( self , key , report_only = False):
"""
Will return numpy vector of all values according to @key.
If the optional argument report_only is true only the values
corresponding to report steps are included. The method is
also available as the 'values' property of an EclSumVector
instance.
"""
if self.has_key( key ):
key_index = cfunc.get_general_var_index( self , key )
if report_only:
index_list = self.report_index_list()
values = numpy.zeros( len(index_list) )
for i in range(len( index_list)):
time_index = index_list[i]
values[i] = cfunc.iiget( self , time_index , key_index )
else:
length = cfunc.data_length( self )
values = numpy.zeros( length )
for i in range( length ):
values[i] = cfunc.iiget( self , i , key_index )
return values
else:
raise KeyError("Summary object does not have key:%s" % key)
def get_key_index( self , key ):
"""
Lookup parameter index of @key.
All the summary keys identified in the SMSPEC file have a
corresponding index which is used internally. This function
will return that index for input key @key, this can then be
used in subsequent calls to e.g. the iiget() method. This is a
minor optimization in the case of many lookups of the same
key:
sum = ecl.EclSum( case )
key_index = sum.get_key_index( key )
for time_index in range( sum.length ):
value = sum.iiget( time_index , key_index )
Quite low-level function, should probably rather use a
EclSumVector based function?
"""
index = cfunc.get_general_var_index( self , key )
if index >= 0:
return index
else:
return None
def get_last_value( self , key ):
"""
Will return the last value corresponding to @key.
Typically useful to get the total production at end of
simulation:
total_production = sum.last_value("FOPT")
The alternative method 'last' will return a EclSumNode
instance with some extra time related information.
"""
return self[key].last_value
def get_last( self , key ):
"""
Will return the last EclSumNode corresponding to @key.
If you are only interested in the final value, you can use the
get_last_value() method.
"""
return self[key].last
def iiget(self , key_index , time_index):
"""
Lookup a summary value based on naive @time_index and
@key_index.
The iiget() method will lookup a summary value based on the
'time' value give by @time_index (i.e. naive counting of
time steps starting at zero), and a key index given by
@key_index. The @key_index value will typically be obtained
with the get_key_index() method first.
This is a quite low level function, in most cases it will be
natural to go via e.g. an EclSumVector instance.
"""
return cfunc.iiget( self , time_index , key_index )
def iget(self , key , time_index):
"""
Lookup summary value based on @time_index and key.
The @time_index value should be an integer [0,num_steps) and
@key should be string key. To get all the water cut values
from a well:
for time_index in range( sum.length ):
wwct = sum.iget( time_index , "WWCT:W5" )
This is a quite low level function, in most cases it will be
natural to go via e.g. an EclSumVector instance.
"""
return cfunc.get_general_var( self , time_index , key )
def __getitem__(self , key):
"""
Implements [] operator - @key should be a summary key.
The returned value will be a EclSumVector instance.
"""
return self.get_vector( key )
def check_sim_time( self , date):
"""
Will check if the input date is in the time span [sim_start , sim_end].
"""
return cfunc.check_sim_time( self , ctime(date) )
def get_interp( self , key , days = None , date = None):
"""
Will lookup vector @key at time given by @days or @date.
Requiers exactly one input argument @days or @date; will raise
exception ValueError if this is not satisfied.
The method will check that the time argument is within the
time limits of the simulation; if else the method will raise
exception ValueError.
Also available as method get_interp() on the EclSumVector
class.
"""
if days:
if date:
raise ValueError("Must supply either days or date")
else:
if cfunc.check_sim_days( self , days ):
return cfunc.get_general_var_from_sim_days( self , days , key )
else:
raise ValueError("days:%s is outside range of simulation: [%g,%g]" % (days , self.first_day , self.sim_length))
elif date:
if self.check_sim_time( date ):
return cfunc.get_general_var_from_sim_time( self , ctime(date) , key )
else:
raise ValueError("date:%s is outside range of simulation data" % date)
else:
raise ValueError("Must supply either days or date")
def get_report( self , date = None , days = None):
"""
Will return the report step corresponding to input @date or @days.
If the input argument does not correspond to any report steps
the function will return -1. Observe that the function
requires strict equality.
"""
if date:
if days:
raise ValueError("Must supply either days or date")
step = cfunc.get_report_step_from_time( self , ctime(date))
elif days:
step = cfunc.get_report_step_from_days( self , days)
return step
def get_report_time( self , report):
"""
Will return the datetime corresponding to the report_step @report.
"""
ctime = cfunc.get_report_time( self , report )
return ctime.date()
def get_interp_vector( self , key , days_list = None , date_list = None):
"""
Will return numpy vector with interpolated values.
Requiers exactly one input argument @days or @date; will raise
exception ValueError if this is not satisfied.
The method will check that the time arguments are within the
time limits of the simulation; if else the method will raise
exception ValueError.
Also available as method get_interp_vector() on the
EclSumVector class.
"""
if days_list:
if date_list:
raise ValueError("Must supply either days_list or date_list")
else:
vector = numpy.zeros( len(days_list ))
sim_length = self.sim_length
sim_start = self.first_day
index = 0
for days in days_list:
if days >= sim_start and days < sim_length:
vector[index] = cfunc.get_general_var_from_sim_days( self , days , key)
else:
raise ValueError("Invalid days value")
index += 1
elif date_list:
start_time = self.data_start
end_time = self.end_date
vector = numpy.zeros( len(date_list ))
index = 0
for date in date_list:
if date >= start_time and date <= end_time:
vector[index] = cfunc.get_general_var_from_sim_time( self , ctime(date) , key)
else:
raise ValueError("Invalid date value")
index += 1
else:
raise ValueError("Must supply either days_list or date_list")
return vector
def get_from_report( self , key , report_step ):
"""
Return summary value of @key at time @report_step.
"""
time_index = cfunc.get_report_end( self , report_step )
return cfunc.get_general_var( self , time_index , key )
def has_key( self , key):
"""
Check if summary object has key @key.
"""
return cfunc.has_key( self, key )
def smspec_node( self , key ):
"""
Will return a EclSMSPECNode instance corresponding to @key.
The returned EclSMPECNode instance can then be used to ask for
various properties of the variable; i.e. if it is a rate
variable, what is the unit, if it is a total variable and so
on.
"""
if self.has_key( key ):
c_ptr = cfunc.get_var_node( self , key )
return EclSMSPECNode( c_ptr , self )
else:
raise KeyError("Summary case does not have key:%s" % key)
def unit(self , key):
"""
Will return the unit of @key.
"""
node = self.smspec_node( key )
return node.unit
@property
def case(self):
"""
Will return the case name of the current instance - optionally including path.
"""
return cfunc.get_simcase( self )
@property
def path(self):
"""
Will return the path to the current case. Will be None for
case in CWD. See also abs_path.
"""
return cfunc.get_path( self )
@property
def base(self):
"""
Will return the basename of the current case - no path.
"""
return cfunc.get_base( self )
@property
def abs_path(self):
"""
Will return the absolute path to the current case.
"""
return cfunc.get_abs_path( self )
#-----------------------------------------------------------------
# Here comes functions for getting vectors of the time
# dimension. All the get_xxx() functions have an optional boolean
# argument @report_only. If this argument is set to True the
# functions will return time vectors only corresponding to the
# report times.
#
# In addition to the get_xxx() methods there are properties with
# the same name (excluding the 'get'); these properties correspond
# to an get_xxx() invocation with optional argument report_only
# set to False (i.e. the defualt).
@property
def days( self ):
"""
Will return a numpy vector of simulations days.
"""
return self.get_days( False )
def get_days( self , report_only = False):
"""
Will return a numpy vector of simulations days.
If the optional argument @report_only is set to True, only
'days' values corresponding to report steps will be included.
"""
if report_only:
return self.__daysR
else:
return self.__days
@property
def dates( self ):
"""
Will return a list of simulation dates.
The list will be an ordinary Python list, and the dates will
be in terms ordinary Python datetime values.
"""
return self.get_dates( False )
def get_dates( self , report_only = False):
"""
Will return a list of simulation dates.
The list will be an ordinary Python list, and the dates will
be in terms ordinary Python datetime values. If the optional
argument @report_only is set to True, only dates corresponding
to report steps will be included.
"""
if report_only:
return self.__datesR
else:
return self.__dates
@property
def mpl_dates( self ):
"""
Will return a numpy vector of dates ready for matplotlib
The content of the vector are dates in matplotlib format,
i.e. floats - generated by the date2num() function at the top
of this file.
"""
return self.get_mpl_dates( False )
def get_mpl_dates( self , report_only = False):
"""
Will return a numpy vector of dates ready for matplotlib
If the optional argument @report_only is set to True, only
dates values corresponding to report steps will be
included. The content of the vector are dates in matplotlib
format, i.e. floats - generated by the date2num() function at
the top of this file.
"""
if report_only:
return self.__mpl_datesR
else:
return self.__mpl_dates
@property
def mini_step( self ):
"""
Will return a a python list of ministep values.
Will return a Python list of ministep values from this summary
case; the ministep values are the internal indexing of
timesteps provided by the reservoir simulator. In normal cases
this will be: [0,1,2,3,4,5,....], but in the case of restarted
simulations it can start at a higher value, and there can also
be 'holes' in the series if 'RPTONLY' has been used in THE
ECLIPSE datafile.
"""
return self.get_mini_step( False )
def get_mini_step( self , report_only = False):
"""
Will return a a python list of ministep values.
If the optional argument @report_only is set to True, only
dates values corresponding to report steps will be
included. See documentation of property: 'mini_step' for
further documentation.
"""
if report_only:
return self.__mini_stepR
else:
return self.__mini_step
@property
def report_step( self ):
"""
Will return a list of report steps.
The simulator will typically use several simulation timesteps
for each report step, and the number will change between
different report steps. So - assuming that the first report
step one has five simulations timesteps and the next two have
three the report_step vector can look like:
[...,1,1,1,1,1,2,2,2,3,3,3,....]
"""
return self.get_report_step( False )
def get_report_step( self , report_only = False):
if report_only:
return self.__report_stepR
else:
return self.__report_step
#-----------------------------------------------------------------
def iget_days(self , time_index):
"""
Returns the number of simulation days for element nr @time_index.
"""
return cfunc.iget_sim_days( self , time_index )
def iget_date(self , time_index):
"""
Returns the simulation date for element nr @time_index.
"""
return cfunc.iget_sim_time( self , time_index ).datetime()
def iget_report( self , time_index ):
"""
Returns the report step corresponding to @time_index.
One report step will in general contain many ministeps.
"""
return cfunc.iget_report_step( self , time_index )
@property
def length(self):
"""
The number of timesteps in the dataset.
"""
return cfunc.data_length( self )
@property
def first_day(self):
"""
The first day we have simulation data for; normally 0.
"""
return cfunc.get_first_day( self )
@property
def sim_length( self ):
"""
The length of the current dataset in simulation days.
Will include the length of a leading restart section,
irrespective of whether we have data for this or not.
"""
return cfunc.sim_length( self )
@property
def start_date(self):
"""
A Python date instance with the start date.
The start time is taken from the SMSPEC file, and in case not
all timesteps have been loaded, e.g. for a restarted case, the
returned start_date might be different from the datetime of
the first (loaded) timestep.
"""
ctime = cfunc.get_start_date( self )
return ctime.date()
@property
def end_date(self):
"""
The date of the last (loaded) time step.
"""
ctime = cfunc.get_end_date( self )
return ctime.date()
@property
def data_start(self):
"""
The first date we have data for.
"""
ctime = cfunc.get_data_start( self )
return ctime.date()
@property
def start_time(self):
"""
A Python datetime instance with the start time.
See start_date() for further details.
"""
ctime = cfunc.get_start_date( self )
return ctime.datetime()
@property
def end_time(self):
"""
The time of the last (loaded) time step.
"""
ctime = cfunc.get_end_date( self )
return ctime.datetime()
@property
def last_report(self):
"""
The number of the last report step in the dataset.
"""
return cfunc.get_last_report_step( self )
@property
def first_report(self):
"""
The number of the last report step in the dataset.
"""
return cfunc.get_first_report_step( self )
def first_gt_index( self , key , limit ):
"""
Returns the first index where @key is above @limit.
"""
key_index = cfunc.get_general_var_index( self , key )
time_index = cfunc.get_first_lt( self , key_index , limit )
return time_index
def first_lt_index( self , key , limit ):
"""
Returns the first index where @key is below @limit.
"""
key_index = cfunc.get_general_var_index( self , key )
time_index = cfunc.get_first_lt( self , key_index , limit )
return time_index
def first_gt( self , key , limit ):
"""
First EclSumNode of @key which is above @limit.
"""
vector = self[key]
return vector.first_gt( limit )
def first_lt(self , key , limit ):
"""
First EclSumNode of @key which is below @limit.
"""
vector = self[key]
return vector.first_lt( limit )
def keys( self , pattern = None):
"""
Return a list of summary keys matching @pattern.
The matching algorithm is ultimately based on the fnmatch()
function, i.e. normal shell-character syntax is used. With
@pattern == "WWCT:*" you will get a list of watercut keys for
all wells.
If pattern is None you will get all the keys of summary
object.
"""
s = StringList()
cfunc.select_matching_keys( self , pattern , s )
return s.strings
def fwrite(self , ecl_case = None):
if ecl_case:
cfunc.set_case( self , ecl_case )
cfunc.fwrite_sum( self )
#################################################################
# 2. Creating a wrapper object around the libecl library,
# registering the type map : ecl_kw <-> EclKW
cwrapper = CWrapper( libecl.lib )
cwrapper.registerType( "ecl_sum" , EclSum )
cwrapper.registerType( "smspec_node" , EclSMSPECNode )
# 3. Installing the c-functions used to manipulate ecl_kw instances.
# These functions are used when implementing the EclKW class, not
# used outside this scope.
cfunc = CWrapperNameSpace("ecl_sum")
cfunc.create_well_list = cwrapper.prototype("c_void_p ecl_sum_alloc_well_list( ecl_sum , char* )")
cfunc.create_group_list = cwrapper.prototype("c_void_p ecl_sum_alloc_group_list( ecl_sum , char* )")
cfunc.fread_alloc = cwrapper.prototype("c_void_p ecl_sum_fread_alloc_case__( char* , char* , bool)")
cfunc.iiget = cwrapper.prototype("double ecl_sum_iget( ecl_sum , int , int)")
cfunc.free = cwrapper.prototype("void ecl_sum_free( ecl_sum )")
cfunc.data_length = cwrapper.prototype("int ecl_sum_get_data_length( ecl_sum )")
cfunc.iget_sim_days = cwrapper.prototype("double ecl_sum_iget_sim_days( ecl_sum , int) ")
cfunc.iget_report_step = cwrapper.prototype("int ecl_sum_iget_report_step( ecl_sum , int) ")
cfunc.iget_mini_step = cwrapper.prototype("int ecl_sum_iget_mini_step( ecl_sum , int) ")
cfunc.iget_sim_time = cwrapper.prototype("time_t ecl_sum_iget_sim_time( ecl_sum , int) ")
cfunc.get_report_end = cwrapper.prototype("int ecl_sum_iget_report_end( ecl_sum , int)")
cfunc.get_general_var = cwrapper.prototype("double ecl_sum_get_general_var( ecl_sum , int , char*)")
cfunc.get_general_var_index = cwrapper.prototype("int ecl_sum_get_general_var_params_index( ecl_sum , char*)")
cfunc.get_general_var_from_sim_days = cwrapper.prototype("double ecl_sum_get_general_var_from_sim_days( ecl_sum , double , char*)")
cfunc.get_general_var_from_sim_time = cwrapper.prototype("double ecl_sum_get_general_var_from_sim_time( ecl_sum , time_t , char*)")
cfunc.get_first_gt = cwrapper.prototype("int ecl_sum_get_first_gt( ecl_sum , int , double )")
cfunc.get_first_lt = cwrapper.prototype("int ecl_sum_get_first_lt( ecl_sum , int , double )")
cfunc.get_start_date = cwrapper.prototype("time_t ecl_sum_get_start_time( ecl_sum )")
cfunc.get_end_date = cwrapper.prototype("time_t ecl_sum_get_end_time( ecl_sum )")
cfunc.get_last_report_step = cwrapper.prototype("int ecl_sum_get_last_report_step( ecl_sum )")
cfunc.get_first_report_step = cwrapper.prototype("int ecl_sum_get_first_report_step( ecl_sum )")
cfunc.iget_report_step = cwrapper.prototype("int ecl_sum_iget_report_step( ecl_sum , int )")
cfunc.select_matching_keys = cwrapper.prototype("void ecl_sum_select_matching_general_var_list( ecl_sum , char* , stringlist )")
cfunc.has_key = cwrapper.prototype("bool ecl_sum_has_general_var( ecl_sum , char* )")
cfunc.check_sim_time = cwrapper.prototype("bool ecl_sum_check_sim_time( ecl_sum , time_t )")
cfunc.check_sim_days = cwrapper.prototype("bool ecl_sum_check_sim_days( ecl_sum , double )")
cfunc.sim_length = cwrapper.prototype("double ecl_sum_get_sim_length( ecl_sum )")
cfunc.get_first_day = cwrapper.prototype("double ecl_sum_get_first_day( ecl_sum )")
cfunc.get_data_start = cwrapper.prototype("time_t ecl_sum_get_data_start( ecl_sum )")
cfunc.get_unit = cwrapper.prototype("char* ecl_sum_get_unit( ecl_sum , char*)")
cfunc.get_simcase = cwrapper.prototype("char* ecl_sum_get_case( ecl_sum )")
cfunc.get_base = cwrapper.prototype("char* ecl_sum_get_base( ecl_sum )")
cfunc.get_path = cwrapper.prototype("char* ecl_sum_get_path( ecl_sum )")
cfunc.get_abs_path = cwrapper.prototype("char* ecl_sum_get_abs_path( ecl_sum )")
cfunc.get_report_step_from_time = cwrapper.prototype("int ecl_sum_get_report_step_from_time( ecl_sum , time_t)")
cfunc.get_report_step_from_days = cwrapper.prototype("int ecl_sum_get_report_step_from_days( ecl_sum , double)")
cfunc.get_report_time = cwrapper.prototype("time_t ecl_sum_get_report_time(ecl_sum , int)")
cfunc.fwrite_sum = cwrapper.prototype("void ecl_sum_fwrite(ecl_sum)")
cfunc.set_case = cwrapper.prototype("void ecl_sum_set_case(ecl_sum, char*)")
#-----------------------------------------------------------------
# smspec node related stuff
cfunc.get_var_node = cwrapper.prototype("c_void_p ecl_sum_get_general_var_node(ecl_sum , char* )")
cfunc.node_is_total = cwrapper.prototype("bool smspec_node_is_total( smspec_node )")
cfunc.node_is_historical = cwrapper.prototype("bool smspec_node_is_historical( smspec_node )")
cfunc.node_is_rate = cwrapper.prototype("bool smspec_node_is_rate( smspec_node )")
cfunc.node_unit = cwrapper.prototype("char* smspec_node_get_unit( smspec_node )")
cfunc.node_wgname = cwrapper.prototype("char* smspec_node_get_wgname( smspec_node )")
cfunc.node_keyword = cwrapper.prototype("char* smspec_node_get_keyword( smspec_node )")
cfunc.node_num = cwrapper.prototype("int smspec_node_get_num( smspec_node )")
cfunc.node_need_num = cwrapper.prototype("bool smspec_node_need_nums( smspec_node )")
|
gpl-3.0
|
sheshant/cuda-convnet2
|
shownet.py
|
180
|
18206
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
|
apache-2.0
|
3324fr/spinalcordtoolbox
|
scripts/sct_compute_ernst_angle.py
|
1
|
5126
|
#!/usr/bin/env python
#########################################################################################
#
# All sort of utilities for labels.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont
# Modified: 2015-02-17
#
# About the license: see the file LICENSE.TXT
#########################################################################################
from msct_parser import Parser
import sys
import sct_utils as sct
#import numpy as np
# DEFAULT PARAMETERS
class Param:
## The constructor
def __init__(self):
self.debug = 0
self.verbose = 1
self.t1=0
class ErnstAngle:
## The constructor
def __init__(self, t1,tr=None, fname_output=None):
self.t1=t1
self.tr=tr
self.fname_output = fname_output
#compute and return the Ernst Angle
def getErnstAngle(self,tr):
from numpy import arccos
from numpy import exp
from math import pi
angle_rad=arccos(exp(-tr/self.t1))
angle_deg=angle_rad*180/pi
return angle_deg
#draw the graph
def draw(self,tr_min,tr_max):
import matplotlib.pyplot as plt
from numpy import arange
step=(tr_max-tr_min)/50
tr_range=arange(tr_min,tr_max,step)
theta_E=self.getErnstAngle(tr_range)
sct.printv("\nDrawing",type='info')
plt.plot(tr_range,theta_E,linewidth=1.0)
plt.xlabel("TR (in $ms$)")
plt.ylabel("$\Theta_E$ (in degree)")
plt.ylim(min(theta_E),max(theta_E)+2)
plt.title("Ernst Angle with T1="+str(self.t1)+"ms")
plt.grid(True)
if self.tr is not None:
plt.plot(self.tr,self.getErnstAngle(self.tr),'ro')
if self.fname_output is not None :
sct.printv("\nSaving figure",type='info')
plt.savefig(self.fname_output,format='png')
plt.show()
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Function to get the Ernst Angle. For examples of T1 values, see Stikov et al. MRM 2015. Example in the white matter at 3T: 850ms.')
parser.add_option(name="-t1",
type_value="float",
description="T1 value (in ms).",
mandatory=True,
example='800')
parser.add_option(name="-b",
type_value=[[','],'float'],
description="Boundaries for the TR parameter (in ms).",
mandatory=False,
example='500,3500')
parser.add_option(name="-tr",
type_value='float',
description="Value of TR (in ms) to get the Ernst Angle. ",
mandatory=False,
example='2000')
parser.add_option(name="-d",
type_value=None,
description="Display option. The graph isn't display if 0. ",
deprecated_by="-v",
mandatory=False)
parser.add_option(name="-o",
type_value="file_output",
description="Name of the output graph of the Ernst angle.",
mandatory=False,
example="ernst_angle.png")
parser.add_option(name="-v",
type_value='multiple_choice',
description="verbose: 0 = nothing, 1 = classic, 2 = expended (graph)",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
#=======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
# initialize parameters
param = Param()
param_default = Param()
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
input_t1 = arguments["-t1"]
input_fname_output = None
input_tr_min=500
input_tr_max=3500
input_tr=None
verbose=1
if "-o" in arguments:
input_fname_output = arguments["-o"]
if "-b" in arguments:
input_tr_min = arguments["-b"][0]
input_tr_max = arguments["-b"][1]
if "-tr" in arguments :
input_tr=arguments["-tr"]
if "-v" in arguments :
verbose=int(arguments["-v"])
graph = ErnstAngle(input_t1, tr=input_tr, fname_output=input_fname_output)
if input_tr is not None:
sct.printv("\nValue of the Ernst Angle with T1="+str(graph.t1)+"ms and TR="+str(input_tr)+"ms :",verbose=verbose,type='info')
sct.printv(str(graph.getErnstAngle(input_tr)))
if input_tr>input_tr_max:
input_tr_max=input_tr+500
elif input_tr<input_tr_min:
input_tr_min=input_tr-500
if verbose == 2 :
graph.draw(input_tr_min, input_tr_max)
|
mit
|
dsavoiu/kafe2
|
case_studies/determinant_cost.py
|
1
|
8614
|
"""
kafe2 Case Study: Determinant Cost
==================================
So far we've seen two cases where kafe2 uses dynamic errors: when adding x errors or when adding
errors relative to the model.
In such cases the errors are a function of the parameter values.
However, this introduces a bias towards parameter values that result in large errors because this
reduces the overall cost.
More specifically, this results in a bias towards parameter values with increased absolute
derivatives for x errors or with increased absolute model function values for relative model errors.
The aforementioned bias can be counteracted by adding an extra term to the cost function: the
logarithm of the determinant of the covariance matrix.
In this example we will investigate the effect of (not) adding the determinant cost to a chi2 cost
function when handling data with xy errors with kafe2.
To get a better understanding of how kafe2 works internally we will also do a manual implementation
of a fit with SciPy.
Finally, we will compare these results with SciPy orthogonal distance regression (ODR), another tool
that can fit a model function to data with xy errors.
"""
import numpy as np
from scipy.optimize import minimize
from scipy.odr import RealData, Model, ODR
from kafe2 import XYFit, XYCostFunction_Chi2
from numdifftools import Hessian
import matplotlib.pyplot as plt
# Seed the NumPy RNG to ensure consistent results:
np.random.seed(1)
# The x error is much larger than the y error.
# This results in a stronger bias compared to a large y error and a small x error.
# The bias disappears for X_ERROR -> 0.
X_ERROR = 1.0
Y_ERROR = 0.2
# The fit parameter values we use to generate the toy data:
TRUE_PARAMETER_VALUES = np.array([1.0, 0.1, -1.0])
PARAMETER_NAMES = ["a", "b", "c"]
# Our model function is an exponential model with three parameters:
def model_function(x, a, b, c):
return a * np.exp(b * x) + c
# The derivative of our model function.
# Note that the parameters have different effects on the derivative.
# c has no effect at all.
# An increase in either a or b leads to an increase in the derivative,
# the effect of b is greater for x > 0.
def model_function_derivative(x, a, b, c):
return a * b * np.exp(x * b)
# The x data assumed by the experimenter:
x_data = np.linspace(start=-10, stop=10, num=61)
# The actual x data when factoring in x errors:
true_x_data = x_data + np.random.normal(size=x_data.shape, scale=X_ERROR)
# The y data based on the unknown true x values:
y_data = model_function(true_x_data, *TRUE_PARAMETER_VALUES)\
+ np.random.normal(size=x_data.shape, scale=Y_ERROR)
# Utility function to do a fit with kafe2:
def kafe2_fit(add_determinant_cost):
fit = XYFit(
xy_data=[x_data, y_data],
model_function=model_function,
# Create a kafe2 cost function object to turn off the determinant cost:
cost_function=XYCostFunction_Chi2(add_determinant_cost=add_determinant_cost),
)
fit.add_error(axis="x", err_val=X_ERROR)
fit.add_error(axis="y", err_val=Y_ERROR)
# Set the parameter values to the true values because we're only interested in the bias:
fit.set_all_parameter_values(TRUE_PARAMETER_VALUES)
fit.do_fit()
return fit.parameter_values, fit.parameter_errors
kafe2_values_det, kafe2_errors_det = kafe2_fit(add_determinant_cost=True)
kafe2_values_no_det, kafe2_errors_no_det = kafe2_fit(add_determinant_cost=False)
# This is our chi2 cost function.
def chi2(args, add_determinant_cost):
a, b, c = args # Unpack args from format expected by scipy.optimize.
y_model = model_function(x_data, a, b, c)
# Calculate the projected y error by extrapolating the x error based on the derivatives.
# Note how a large absolute derivative results in a large projected y error.
projected_y_error = np.sqrt(
Y_ERROR ** 2
+ (model_function_derivative(x_data, a, b, c) * X_ERROR) ** 2
)
# Now just calculate chi2 as per usual:
normed_residuals = (y_data - y_model) / projected_y_error
cost = np.sum(normed_residuals ** 2)
# Note how large values for projected_y_error result in a lower overall cost.
if add_determinant_cost:
# Add extra cost based on the determinant of the covariance matrix.
# We are using uncorrelated errors in which case the covariance matrix is diagonal.
# The determinant can therefore be calculated as np.prod(projected_y_error ** 2) .
# But because this can result in overflow we instead calculate the extra cost like this:
cost += 2.0 * np.sum(np.log(projected_y_error))
# The above line is equivalent to:
# cost += np.log(np.prod(projected_y_error ** 2))
#
# Note how large values for projected_y_error result in a higher overall cost.
return cost
# Utility function to do a manual fit with scipy.optimize.minimize:
def scipy_fit(add_determinant_cost):
# Wrapped function to hand over to minimize:
def cost_function(args):
return chi2(args, add_determinant_cost)
optimize_result = minimize(
fun=cost_function,
# Initialize fit with true values because we're only interested in the bias:
x0=TRUE_PARAMETER_VALUES,
)
parameter_values = optimize_result.x
# Calculate parameter errors from Hessian matrix (2nd order derivatives) at minimum:
hessian_matrix = Hessian(cost_function)(parameter_values)
# Not to be confused with the covariance matrix of our data:
parameter_covariance_matrix = 2.0 * np.linalg.inv(hessian_matrix)
parameter_errors = np.sqrt(np.diag(parameter_covariance_matrix))
return parameter_values, parameter_errors
scipy_values_det, scipy_errors_det = scipy_fit(add_determinant_cost=True)
scipy_values_no_det, scipy_errors_no_det = scipy_fit(add_determinant_cost=False)
# Do a fit with SciPy ODR for comparison:
odr_data = RealData(x_data, y_data, X_ERROR, Y_ERROR)
odr_model = Model(lambda parameter_values, x: model_function(x, *parameter_values))
odr_fit = ODR(odr_data, odr_model, beta0=TRUE_PARAMETER_VALUES)
odr_result = odr_fit.run()
odr_values = odr_result.beta
odr_errors = odr_result.sd_beta
# Utility function to print out results:
def print_results(name, parameter_values, parameter_errors):
print("======== {name} ========".format(name=name))
for pn, pv, pe, epv in zip(
PARAMETER_NAMES, parameter_values, parameter_errors, TRUE_PARAMETER_VALUES):
sigma = abs(pv - epv) / pe
print("{pn} = {pv:.4f} +- {pe:.4f} (off by {sigma:.2f} sigma)".format(
pn=pn, pv=pv, pe=pe, sigma=sigma))
print()
print_results("kafe2 with det cost", kafe2_values_det, kafe2_errors_det)
print_results("kafe2 without det cost", kafe2_values_no_det, kafe2_errors_no_det)
print_results("scipy minimize with det cost", scipy_values_det, scipy_errors_det)
print_results("scipy minimize without det cost", scipy_values_no_det, scipy_errors_no_det)
# Unsurprisingly kafe2 and our manual re-implementation of kafe2 yield almost the exact same result.
# Note how fits without determinant cost have higher values for b which has the biggest influence
# on the model function derivative.
# Because our fit parameters are correlated this bias also influences the other parameters,
# even if they have no influence on the model function derivative as is the case with c.
#
# With the default seed our fit result becomes worse through the aforementioned bias.
# However, if the seed is changed or removed the biased fit result can sometimes be better.
# This is because our data can just happen to have errors that result in a fit with parameter values
# that underestimate the model function derivative.
# On average the unbiased fit result will be better.
print_results("scipy ODR", odr_values, odr_errors)
# SciPy ODR is comparable to kafe2 without determinant cost.
# Finally, let's do a simple plot for our results.
# Note how the fit result without determinant cost results in a steeper model function.
x_plot = np.linspace(start=-12, stop=12, num=121)
plt.errorbar(
x_data, y_data, xerr=X_ERROR, yerr=Y_ERROR,
color="tab:blue", marker=".", ls="", label="Data"
)
plt.plot(
x_plot, model_function(x_plot, *kafe2_values_det),
color="black", label="kafe2 with det cost"
)
plt.plot(
x_plot, model_function(x_plot, *kafe2_values_no_det),
color="yellow", ls="--", label="kafe2 without det cost"
)
plt.plot(
x_plot, model_function(x_plot, *odr_values),
color="red", ls=":", label="scipy odr"
)
plt.legend()
plt.xlim(-12, 12)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
gpl-3.0
|
maxalbert/bokeh
|
bokeh/compat/mplexporter/tools.py
|
75
|
1732
|
"""
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
|
bsd-3-clause
|
BonexGu/Blik2D-SDK
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
|
88
|
31139
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
|
mit
|
michigraber/scikit-learn
|
sklearn/ensemble/gradient_boosting.py
|
126
|
65552
|
"""Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
|
bsd-3-clause
|
hlin117/scikit-learn
|
sklearn/utils/tests/test_linear_assignment.py
|
421
|
1349
|
# Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
|
bsd-3-clause
|
pprett/statsmodels
|
statsmodels/graphics/gofplots.py
|
1
|
7297
|
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from . import utils
__all__ = ['qqplot']
def qqplot(data, dist=stats.norm, distargs=(), a=0, loc=0, scale=1, fit=False,
line=False, ax=None):
"""
qqplot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array-like
1d data array
dist : A scipy.stats or statsmodels distribution
Compare x against dist. The default
is scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
loc : float
Location parameter for dist
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
scale : float
Scale parameter for dist
fit : boolean
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared.:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
- If True a reference line is drawn on the graph. The default is to
fit a line via OLS regression.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, data.exog).fit()
>>> res = mod_fit.resid
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot against quantiles of t-distribution with 4 degrees of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line='45')
>>> plt.show()
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
"""
fig, ax = utils.create_mpl_ax(ax)
if not hasattr(dist, 'ppf'):
raise ValueError("distribution must have a ppf method")
nobs = data.shape[0]
if fit:
fit_params = dist.fit(data)
loc = fit_params[-2]
scale = fit_params[-1]
if len(fit_params)>2:
dist = dist(*fit_params[:-2], **dict(loc = 0, scale = 1))
else:
dist = dist(loc=0, scale=1)
elif distargs or loc != 0 or scale != 1:
dist = dist(*distargs, **dict(loc=loc, scale=scale))
try:
theoretical_quantiles = dist.ppf(plotting_pos(nobs, a))
except:
raise ValueError('distribution requires more parameters')
sample_quantiles = np.array(data, copy=True)
sample_quantiles.sort()
if fit:
sample_quantiles -= loc
sample_quantiles /= scale
ax.set_xmargin(0.02)
ax.plot(theoretical_quantiles, sample_quantiles, 'bo')
if line:
if line not in ['r','q','45','s']:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, theoretical_quantiles, sample_quantiles, dist)
ax.set_xlabel("Theoretical Quantiles")
ax.set_ylabel("Sample Quantiles")
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {'45','r','s','q'}
Options for the reference line to which the data is compared.:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled by the standard deviation of the given sample and have the mean added
to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : array
X data for plot. Not needed if line is '45'.
y : array
Y data for plot. Not needed if line is '45'.
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is 'q'.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
"""
if line == '45':
end_pts = zip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = max(end_pts[0])
end_pts[1] = min(end_pts[1])
ax.plot(end_pts, end_pts, fmt)
return # does this have any side effects?
if x is None and y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
elif line == 'r':
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are 'clean'
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x,y,fmt)
elif line == 's':
m,b = y.std(), y.mean()
ref_line = x*m + b
ax.plot(x, ref_line, fmt)
elif line == 'q':
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([.25,.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m*theoretical_quartiles[0]
ax.plot(x, m*x + b, fmt)
#about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float
Offset for the plotting position of an expected order statistic, for
example.
Returns
-------
plotting_positions : array
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs - 2*a + 1) for i in
range(0,nobs+1)
See also
--------
scipy.stats.mstats.plotting_positions
"""
return (np.arange(1.,nobs+1) - a)/(nobs- 2*a + 1)
|
bsd-3-clause
|
jpautom/scikit-learn
|
sklearn/metrics/pairwise.py
|
9
|
45248
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter *dense_output* for sparse output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
zihua/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/matplotlib/tests/test_basic.py
|
5
|
1550
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
from nose.tools import assert_equal
from matplotlib.cbook import MatplotlibDeprecationWarning
from matplotlib.testing.decorators import knownfailureif
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'The finance module has been deprecated in mpl 2',
MatplotlibDeprecationWarning)
from pylab import *
def test_simple():
assert_equal(1 + 1, 2)
@knownfailureif(True)
def test_simple_knownfail():
# Test the known fail mechanism.
assert_equal(1 + 1, 3)
def test_override_builtins():
ok_to_override = set([
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'any',
'all',
'sum'
])
# We could use six.moves.builtins here, but that seems
# to do a little more than just this.
if six.PY3:
builtins = sys.modules['builtins']
else:
builtins = sys.modules['__builtin__']
overridden = False
for key in globals().keys():
if key in dir(builtins):
if (globals()[key] != getattr(builtins, key) and
key not in ok_to_override):
print("'%s' was overridden in globals()." % key)
overridden = True
assert not overridden
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
idlead/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
254
|
7434
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
jpautom/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
280
|
2541
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
0asa/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
7
|
30960
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_warm_start(name, random_state=42):
"""Test if fitting incrementally with warm start gives a forest of the
right size and the same results as a normal fit."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
"""Test if fit clears state and grows a new forest when warm_start==False.
"""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
"""Test if warm start second fit with smaller n_estimators raises error."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
"""Test if warm start with equal n_estimators does nothing and returns the
same forest and raises a warning."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
"""Test that the warm start computes oob score when asked."""
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
zzz14/LOST-FOUND
|
userpage/jieba1/test/extract_topic.py
|
65
|
1463
|
import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
|
gpl-3.0
|
loli/semisupervisedforests
|
sklearn/metrics/__init__.py
|
7
|
3258
|
"""
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
]
|
bsd-3-clause
|
0asa/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
17
|
28933
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
bsd-3-clause
|
Mbewu/libmesh
|
doc/statistics/cloc_libmesh.py
|
1
|
7208
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import math
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# git checkout `git rev-list -n 1 --before="$my_date" master`
# cloc.pl src/*/*.C include/*/*.h
data = [
# 2003 - All data from archived svn repo
# '2003-01-10', 158, 29088, # SVN revision 4 - this is the first revision with trunk/libmesh
# '2003-01-20', 184, 28937, # SVN revision 11
# '2003-01-24', 198, 31158, # SVN revision 23
'2003-02-04', 198, 31344, # SVN revision 47
'2003-03-04', 243, 36036,
'2003-04-04', 269, 39946,
'2003-05-04', 275, 40941,
'2003-06-04', 310, 44090,
'2003-07-04', 319, 44445,
'2003-08-04', 322, 45225,
'2003-09-04', 325, 46762,
'2003-10-04', 327, 47151,
'2003-11-04', 327, 47152, # Up to now, all the include files were in the same directory
'2003-12-04', 327, 47184,
# 2004 - All data from archived svn repo
'2004-01-04', 339, 48437,
'2004-02-04', 343, 50455,
'2004-03-04', 347, 52198,
'2004-04-04', 358, 52515,
'2004-05-04', 358, 52653,
'2004-06-04', 369, 53953,
'2004-07-04', 368, 53981,
'2004-08-04', 371, 54316,
'2004-09-04', 371, 54510,
'2004-10-04', 375, 55785,
'2004-11-04', 375, 55880,
'2004-12-04', 384, 56612,
# 2005 - All data from archived svn repo
'2005-01-04', 385, 56674,
'2005-02-04', 406, 61034,
'2005-03-04', 406, 62423,
'2005-04-04', 403, 62595,
'2005-05-04', 412, 63540,
'2005-06-04', 416, 69619,
'2005-07-04', 425, 72092,
'2005-08-04', 425, 72445,
'2005-09-04', 429, 74148,
'2005-10-04', 429, 74263,
'2005-11-04', 429, 74486,
'2005-12-04', 429, 74629,
# 2006 - All data from archived svn repo
'2006-01-04', 429, 74161,
'2006-02-04', 429, 74165,
'2006-03-04', 429, 74170,
'2006-04-04', 429, 74864,
'2006-05-04', 433, 73847,
'2006-06-04', 438, 74681,
'2006-07-04', 454, 76954,
'2006-08-04', 454, 77464,
'2006-09-04', 454, 77843,
'2006-10-04', 454, 78051,
'2006-11-04', 463, 78683,
'2006-12-04', 463, 79057,
# 2007 - All data from archived svn repo
'2007-01-04', 463, 79149,
'2007-02-04', 475, 79344,
'2007-03-04', 479, 81416,
'2007-04-04', 479, 81468,
'2007-05-04', 481, 84312,
'2007-06-04', 481, 85565,
'2007-07-04', 482, 85924,
'2007-08-04', 485, 86248,
'2007-09-04', 487, 86481,
'2007-10-04', 497, 87926,
'2007-11-04', 502, 89687,
'2007-12-04', 512, 93523,
# 2008 - All data from archived svn repo
'2008-01-04', 512, 94263,
'2008-02-04', 515, 94557,
'2008-03-04', 526, 98127,
'2008-04-04', 526, 98256,
'2008-05-04', 531, 99715,
'2008-06-04', 531, 99963,
'2008-07-04', 538, 100839,
'2008-08-04', 542, 101682,
'2008-09-04', 548, 102163,
'2008-10-04', 556, 104185,
'2008-11-04', 558, 104535,
'2008-12-04', 565, 106318,
# 2009 - All data from archived svn repo
'2009-01-04', 565, 106340,
'2009-02-04', 579, 108431,
'2009-03-04', 584, 109050,
'2009-04-04', 584, 109922,
'2009-05-04', 589, 110821,
'2009-06-04', 591, 111094,
'2009-07-04', 591, 111571,
'2009-08-04', 591, 111555,
'2009-09-04', 591, 111746,
'2009-10-04', 591, 111920,
'2009-11-04', 595, 112993,
'2009-12-04', 597, 113744,
# 2010 - All data from archived svn repo
'2010-01-04', 598, 113840,
'2010-02-04', 600, 114378,
'2010-03-04', 602, 114981,
'2010-04-04', 603, 115509,
'2010-05-04', 603, 115821,
'2010-06-04', 603, 115875,
'2010-07-04', 627, 126159,
'2010-08-04', 627, 126217,
'2010-09-04', 628, 126078,
'2010-10-04', 642, 129417,
'2010-11-04', 643, 130045,
'2010-12-04', 648, 131363,
# 2011 - All data from archived svn repo
'2011-01-04', 648, 131644,
'2011-02-04', 648, 132105,
'2011-03-04', 658, 132950,
'2011-04-04', 661, 133643,
'2011-05-04', 650, 133958,
'2011-06-04', 662, 134447,
'2011-07-04', 667, 134938,
'2011-08-04', 679, 136338,
'2011-09-04', 684, 138165,
'2011-10-04', 686, 138627,
'2011-11-04', 690, 141876,
'2011-12-04', 690, 142096,
# 2012
'2012-01-04', 694, 142345,
'2012-02-04', 697, 142585,
'2012-03-04', 703, 146127,
'2012-04-04', 706, 147191,
'2012-05-04', 708, 148202,
'2012-06-04', 705, 148334,
'2012-07-04', 713, 150066,
'2012-08-04', 727, 152269,
'2012-09-04', 725, 152381,
'2012-10-04', 734, 155213, # cloc reports 1092 and 1094 files for Oct/Nov, Don't know what happened...
'2012-11-04', 743, 156082, # We moved from libmesh/src to src around here so maybe that caused it?
'2012-12-04', 752, 156903,
# 2013
'2013-01-04', 754, 158689,
'2013-02-04', 770, 161001,
'2013-03-04', 776, 162189,
'2013-04-04', 783, 162986,
'2013-05-04', 785, 163808,
'2013-06-04', 785, 164022,
'2013-07-04', 789, 163854,
'2013-08-04', 789, 164269,
'2013-09-04', 790, 165129,
'2013-10-04', 790, 165447,
'2013-11-04', 791, 166287,
'2013-12-04', 794, 168772,
# 2014
'2014-01-04', 796, 170174,
'2014-02-04', 796, 170395,
'2014-03-04', 799, 172037,
'2014-04-04', 801, 172262,
'2014-05-04', 806, 173772,
'2014-06-04', 807, 171098,
'2014-07-04', 807, 171220,
'2014-08-04', 808, 172534,
'2014-09-04', 808, 173639,
'2014-10-04', 819, 175750,
'2014-11-04', 819, 176415,
'2014-12-04', 819, 176277,
# 2015
'2015-01-04', 819, 176289,
]
# Extract the dates from the data array
date_strings = data[0::3]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%Y-%m-%d')))
# Extract number of files from data array
n_files = data[1::3]
# Extract number of lines of code from data array
n_lines = data[2::3]
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax1 = fig.add_subplot(111)
ax1.plot(date_nums, n_files, 'bo-')
ax1.set_ylabel('Files (blue circles)')
# Set up x-tick locations
ticks_names = ['2003-03-04', '2007-03-04', '2011-03-04', '2015-01-04']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime(x, '%Y-%m-%d')))
# Set tick labels and positions
ax1.set_xticks(tick_nums)
ax1.set_xticklabels(ticks_names)
# Use the twinx() command to plot more data on the other axis
ax2 = ax1.twinx()
ax2.plot(date_nums, np.divide(n_lines, 1000.), 'gs-')
ax2.set_ylabel('Lines of code in thousands (green squares)')
# Create linear curve fits of the data
files_fit = np.polyfit(date_nums, n_files, 1)
lines_fit = np.polyfit(date_nums, n_lines, 1)
# Convert to files/month
files_per_month = files_fit[0]*(365./12.)
lines_per_month = lines_fit[0]*(365./12.)
# Print curve fit data on the plot , '%.1f'
files_msg = 'Approx. ' + '%.1f' % files_per_month + ' files added/month'
lines_msg = 'Approx. ' + '%.1f' % lines_per_month + ' lines added/month'
ax1.text(date_nums[len(date_nums)/4], 300, files_msg);
ax1.text(date_nums[len(date_nums)/4], 250, lines_msg);
# Save as PDF
plt.savefig('cloc_libmesh.pdf', format='pdf')
|
lgpl-2.1
|
yaukwankiu/armor
|
observe.py
|
1
|
4922
|
"""
== USE ==
from armor import pattern
from armor.observe import window as obsw
a = pattern.a
b = pattern.b
lowerLeft = (250, 200)
windowSize = (100, 100)
x = obsw(a, b, 250, 200, 100, 100, display=True, toFile='testing/test114/log.1.txt)
== RESULTS ==
"""
######################################
# imports
import numpy as np
import numpy.ma as ma
import matplotlib.pyplot as plt
from armor import pattern
from armor.shiiba import regression2 as regression
from armor.shiiba import regression3 as reg
from armor.shiiba import regressionCFLfree as cflfree
from testing.test112 import test112 as test
from armor.advection import semiLagrangian
sl = semiLagrangian
lsq = np.linalg.lstsq
import os
from imp import reload
import time
import pickle
time0= time.time()
def tic():
global timeStart
timeStart = time.time()
def toc():
print "time spent:", time.time()-timeStart
dbz=pattern.DBZ
def window(a, b, bottom, left, height=100, width=100, searchWindowHeight=9,\
searchWindowWidth=9, display=True, toFolder=''):
# 1. create folder a.outputFolder/observation/time
# 2. create windows aa bb and write image for aa, bb, bb-aa
# 3. regress and write result
# 4. get prediction and write image
if toFolder=="":
#toFolder = a.outputFolder + "bottom%d_left%d_height_%d_width_%d_searchHeight%d_searchWidth_%d" % (bottom,left,height,width, searchWindowHeight, searchWindowWidth)
toFolder = a.outputFolder + str(int(time.time()) % 100000)
if toFolder!=None and toFolder!=False:
try:
os.makedirs(toFolder)
print a.outputFolder, "folder created!"
except OSError:
print a.outputFolder, "exists!"
raise OSError
# initialise the output string
outputFolder = toFolder #alias
output = time.asctime()
output += "\narmor.observe.window: \na = " + a.name + ", b = " + b.name
output += "\nwindow: bottom=%d, left=%d, height=%d, width=%d" % (bottom, left, height, width)
# create the windows and save
aa = a.getWindow(bottom, left, width, height)
bb = b.getWindow(bottom, left, width, height)
diff = (bb-aa)
aa.imagePath = outputFolder + 'a.window%d.png' % height
bb.imagePath = outputFolder + 'b.window%d.png' % height
aa.saveImage()
bb.saveImage()
# compute basic properties
output += "mean of a.window = %f; mean of b.window = %f" %(aa.matrix.mean(),bb.matrix.mean())
output += "Number of data points: a.window: %d, b.window %d" %\
( (1-aa.matrix.mask).sum(), (1-bb.matrix.mask).sum() )
output += "common region: %d" % (1- (aa.matrix.mask+bb.matrix.mask)).sum()
corr = aa.corr(bb)[0,1]
output += "\nCorrelation: %f; Correlation-squared: %f" % (corr, corr**2)
# regress
regressionResults = cflfree.regressLocal(a=aa, b=b, gridSize=5,bottom=bottom,left=left,
height=height,width=width,\
searchWindowHeight=searchWindowHeight,\
searchWindowWidth=searchWindowWidth,\
display=False)
mn = [v[0] for v in regressionResults]
C = [v[1] for v in regressionResults]
Rsquared = [v[2] for v in regressionResults]
output += "\nTop results from CFL-relaxed regression (search window height: %d, width: %d)" %\
(searchWindowHeight, searchWindowWidth)
output += "\n(m, n),\tRsquared,\tc1,...,c9"
for v in regressionResults[:12]:
output += "\n(%d,%d),\t%f, %f %f %f %f %f %f %f %f %f" % (v[0][0],v[0][1],v[2],\
v[1][0], v[1][1],v[1][2],v[1][3],v[1][4],v[1][5],v[1][6],v[1][7],\
v[1][8])
#get prediction
(m,n), C, Rsquared = regressionResults[0]
aa1 = getPrediction(C, aa)
bb1 = b.getWindow(bottom=bottom+m, left=left+n, height=height, width=width)
diff = bb1-aa1
aa1.imagePath = outputFolder + "aa1.window.shiiba.prediction.png"
bb1.imagePath = outputFolder + "bb1.window.shiiba.data.png"
diff.imagePath = outputFolder + "bb1-aa1.window.shiiba.data.png"
aa1.saveImage()
bb1.saveImage()
diff.saveImage()
# compute correlation
corr = aa1.corr(bb1)[0,1]
output += "\nCorrelation between prediction and data in the window: %f; Correlation-squared: %f" % (corr, corr**2)
# output
if display:
print output
open(toFolder+'.log.txt', 'w').write(output)
return output
def main():
a=pattern.a
b=pattern.b
a.outputFolder = "testing/test114/observations0200/"
return window(a=a,b=b, bottom=250, left=250, height=100, width=100,\
searchWindowHeight=7, searchWindowWidth=17, display=True, toFolder='')
if __name__ == '__main__':
main()
|
cc0-1.0
|
mlskit/astromlskit
|
LDA/ldaffront.py
|
2
|
4952
|
from PyQt4 import QtCore, QtGui
from lda import *
import pylab as pl
import numpy as np
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(248, 395)
self.d=2
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(50, 350, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(50, 290, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.takeinput)
self.pushButton_2 = QtGui.QPushButton(Form)
self.pushButton_2.setGeometry(QtCore.QRect(50, 320, 161, 23))
self.pushButton_2.setObjectName(_fromUtf8("pushButton_2"))
self.pushButton_2.clicked.connect(self.takeoutput)
self.groupBox_5 = QtGui.QGroupBox(Form)
self.groupBox_5.setGeometry(QtCore.QRect(20, 10, 221, 61))
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.lineEdit_2 = QtGui.QLineEdit(self.groupBox_5)
self.lineEdit_2.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(20, 90, 221, 80))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(30, 20, 111, 16))
self.label.setObjectName(_fromUtf8("label"))
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(30, 50, 171, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.spinBox = QtGui.QSpinBox(self.groupBox_2)
self.spinBox.setGeometry(QtCore.QRect(150, 20, 42, 22))
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.spinBox.valueChanged.connect(self.setd)
self.textEdit = QtGui.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(20, 180, 221, 101))
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.pushButton_3.clicked.connect(self.startlda)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton.setText(_translate("Form", "Input File", None))
self.pushButton_2.setText(_translate("Form", "Output Folder", None))
self.groupBox_5.setTitle(_translate("Form", "Learner/Classifier Name", None))
self.lineEdit_2.setText(_translate("Form", "LDA", None))
self.groupBox_2.setTitle(_translate("Form", "Options", None))
self.label.setText(_translate("Form", "Dimensionality", None))
self.checkBox.setText(_translate("Form", "Print/plot", None))
def setd(self):
self.d=self.spinBox.value()
print self.d
def takeinput(self):
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
try:
df = pd.read_csv(str(fname), sep=",")
except:
sys.exit(0)
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
self.classlabels=list(df[list(df)[2]])
self.tr=(zip(x,y))
print self.tr
def takeoutput(self):
print "output Taken"
return
fname = QtGui.QFileDialog.getOpenFileName(None, 'Open file', 'C:')
print type(fname)
import pandas as pd
df = pd.read_csv(str(fname), sep=",")
x=list(df[list(df)[0]])
y=list(df[list(df)[1]])
#print x,y
self.te=(zip(x,y))
#print (self.te)
#print len(np.array(self.te).shape)
def startlda(self):
data=np.array(self.tr)
labels=np.array(self.classlabels)
newData,w = lda(data,labels,self.d)
print newData,w
out=open("output.txt","w+")
print>>out,newData,w
pl.plot(data[:,0],data[:,1],'o',newData[:,0],newData[:,0],'.')
pl.show()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Form()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
gpl-3.0
|
srikarym/eigenfaces
|
cnn/cnn1.py
|
1
|
4594
|
# coding: utf-8
# In[1]:
#get_ipython().magic(u'matplotlib inline')
from time import time
import logging
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import fetch_lfw_people
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn import manifold
from sklearn.decomposition import FastICA
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
# In[2]:
# print X_train.shape,h,w
# lyt = X[0]
# lyt = lyt.reshape((h,w))
# plt.imshow(lyt,'gray')
# print X_train.shape
import cv2
from keras.utils import np_utils
print h,w
X_train_reshaped = np.zeros((X_train.shape[0],1,32,32))
for i in xrange(X_train.shape[0]):
X_train_reshaped[i,:,:] = cv2.resize(X_train[i].reshape((h,w)),(32,32))
print X_train_reshaped.shape
X_test_reshaped = np.zeros((X_test.shape[0],1,32,32))
for i in xrange(X_test.shape[0]):
X_test_reshaped[i,:,:] = cv2.resize(X_test[i].reshape((h,w)),(32,32))
print X_test_reshaped.shape
#plt.imshow(X_test_reshaped[0,0])
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
# In[3]:
count = np.zeros(7)
for i in xrange(len(y)):
count[y[i]] = count[y[i]] + 1
print count
class_weight = {0 : 1.0/count[0],
1: 1.0/count[1],
2: 1.0/count[2],
3: 1.0/count[3],
4: 1.0/count[4],
5: 1.0/count[5],
6: 1.0/count[6]}
# In[ ]:
import keras
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras.optimizers import SGD
from keras.constraints import maxnorm
from keras import backend as K
K.set_image_dim_ordering('th')
import json
model = keras.models.Sequential()
model.add(keras.layers.convolutional.Convolution2D(8,3,3,input_shape=(1, 32, 32),border_mode='same',activation='relu',W_constraint=maxnorm(3)))
model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Dropout(0.35))
#model.add(keras.layers.convolutional.Convolution2D(8,3,3,activation='relu',border_mode='same',W_constraint=maxnorm(3)))
#model.add(keras.layers.convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(40,activation='relu',W_constraint=maxnorm(3)))
model.add(keras.layers.Dense(7,activation='softmax'))
json_txt = model.to_json()
# print json_txt
with open('model4.json','w') as outfile:
json.dump(json_txt,outfile)
outfile.close()
epochs = 50
# opt = SGD(lr=lrate,momentum=0.9,decay=decay,nesterov=False)
# opt = SGD(lr=lrate,momentum=0.9,decay=decay,nesterov=True)
# opt = keras.optimizers.Adagrad(lr=0.01, epsilon=1e-08, decay=0.0)
opt = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
model.compile(loss='categorical_crossentropy', optimizer=opt,class_weight = class_weight, metrics=['accuracy'])
filepath="model_weights4.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# model.load_weights("model_weights4.hdf5")
# opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# opt = keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# opt = keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
model.fit(X_train_reshaped, Y_train,validation_data=(X_test_reshaped,Y_test),callbacks=callbacks_list, nb_epoch=200, batch_size=32,verbose = 1)
# In[ ]:
model.load_weights(filepath)
(loss, accuracy) = model.evaluate(X_test_reshaped,Y_test,verbose=0)
print 'test accuracy : ',accuracy
|
mit
|
srome/srome.github.io
|
files/nfl_optimizer/nfl_optimizer.py
|
1
|
3148
|
from gurobipy import *
import numpy as np
import pandas as pd
points = "Points"
salary = "Salary"
position = "Position"
def optimize(player_names, player_data):
varbs = {} # key: player name, value : var
m = Model()
for p in player_names:
varbs[p] = m.addVar(vtype=GRB.BINARY, name = p)
m.update()
all_const = LinExpr()
for p in player_names:
all_const.addTerms(1,varbs[p])
all_const = all_const == 9
# QB
qb_const = LinExpr()
for p in player_names:
if player_data.loc[p, position] == "QB":
qb_const.addTerms(1,varbs[p])
qb_const = qb_const == 1
# WRs
wr_const = LinExpr()
for p in player_names:
if player_data.loc[p, position] == "WR":
wr_const.addTerms(1,varbs[p])
wr2_const = 3 <= wr_const.copy()
wr_const = wr_const <= 4
rb_const = LinExpr()
for p in player_names:
if player_data.loc[p, position] == "RB":
rb_const.addTerms(1,varbs[p])
rb2_const = 2 <= rb_const.copy()
rb_const = rb_const <= 3
te_const = LinExpr()
for p in player_names:
if player_data.loc[p, position] == "TE":
te_const.addTerms(1,varbs[p])
te2_const = te_const.copy()
te2_const = 1 <= te2_const
te_const = te_const <= 2
dst_const = LinExpr()
for p in player_names:
if player_data.loc[p, position] == "DST":
dst_const.addTerms(1,varbs[p])
dst_const = dst_const == 1
sal_const = LinExpr()
for p in player_names:
if type(player_data.loc[p, position]) == type(''):
sal_const.addTerms(player_data.loc[p,salary],varbs[p])
sal_const = sal_const <= 50000
m.addConstr(all_const, "team number constraint")
m.addConstr(qb_const, "qb constraint")
m.addConstr(wr_const, "wr constraint")
m.addConstr(wr2_const, "wr2 constraint")
m.addConstr(rb_const, "rb constraint")
m.addConstr(rb2_const, "rb2 const")
m.addConstr(te_const, "te constraint")
m.addConstr(te2_const, "te 2 constraint")
m.addConstr(dst_const, "dst constraint")
m.addConstr(sal_const, 'salary constraint')
# Objective
obj = LinExpr()
for p in player_names:
if type(player_data.loc[p, points]) == np.float64:
obj.add(varbs[p], player_data.loc[p, points])
m.setObjective(obj, sense= GRB.MAXIMIZE)
m.update()
m.optimize()
sal = 50000
team = []
for v in m.getVars():
if v.x ==1:
sal -= player_data.loc[v.varName, salary]
print('%s %g %s' % (v.varName, player_data.loc[v.varName, points], player_data.loc[v.varName, position]))
team += [v.varName]
print("Salary leftover: " + str(sal))
print('Obj: %g' % m.objVal)
def main():
player_data = pd.read_csv('player_input.csv', index_col=0 )
player_data[points] = player_data[points].astype(np.float64) # Fixes issue around data types
optimize(player_data.index, player_data)
if __name__ == '__main__':
main()
|
mit
|
chrsrds/scikit-learn
|
examples/applications/plot_out_of_core_classification.py
|
11
|
13650
|
"""
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from html.parser import HTMLParser
from urllib.request import urlretrieve
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
sys.stdout.write(
'\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
sys.stdout.write('\r')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
alternate_sign=False)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(max_iter=5, tol=1e-3),
'Perceptron': Perceptron(tol=1e-3),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(tol=1e-3),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = [stats['total_fit_time']
for cls_name, stats in sorted(cls_stats.items())]
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
plt.setp(plt.xticks()[1], rotation=30)
autolabel(rectangles)
plt.tight_layout()
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
chris-wood/SCoNet
|
ns-3-dev/src/core/examples/sample-rng-plot.py
|
188
|
1246
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
gpl-2.0
|
eryueniaobp/contest
|
Tianchi_AI_Factory/train.py
|
1
|
13576
|
# encoding=utf-8
import lightgbm as lgb
from sklearn.datasets import load_svmlight_file
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_selection import RFE
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import PolynomialFeatures,StandardScaler,MaxAbsScaler
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor,ExtraTreesRegressor
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import argparse
from sklearn import svm
import numpy as np
from collections import namedtuple
from sklearn import linear_model
import logging,json, simplejson
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='train.log',
filemode='a')
def lgbcv():
"""
train.cls.txt
-0.035534853456
{'num_leaves': 16, 'learning_rate': 0.02, 'min_child_samples': 20}
train.txt
-0.0349368112809
{'num_leaves': 8, 'learning_rate': 0.05, 'min_child_samples': 60}
:return:
"""
X, y = load_svmlight_file('train.txt')
param_grid = {
'learning_rate': [0.02, 0.05, 0.1],
'num_leaves': [8, 16],
'min_child_samples': [5,10,20,60,80],
}
estimator = lgb.LGBMRegressor(objective='mse',n_estimators=100)
gbm = GridSearchCV(estimator, param_grid, verbose=2, scoring='neg_mean_squared_error', cv=5)
gbm.fit(X,y)
logging.info( "lgbcv {0}".format(gbm.best_score_ ))
logging.info( "lgbcv {0}".format(gbm.best_params_))
def zoocv():
X, y = load_svmlight_file('train.txt')
Zoo = namedtuple('zoo', ['name','flag', 'param_grid', 'reg'],verbose=2)
zoo = [
Zoo('ridge', False, {'alpha': [0,0.5]}, linear_model.Ridge(alpha=0.5)),
# Zoo('ridge with scaler', True, {'alpha':[0,0.5,1]}, Pipeline(
# [
# ('scaler', MaxAbsScaler()), ('ridge', linear_model.Ridge())
# ])
# ),
Zoo('ridge with scaler', True, {'alpha':[0,0.5,1]}, linear_model.Ridge()),
Zoo('svr', False, {'C': [1,10,100,1000] } , svm.SVR(kernel='rbf', C=1e3)),
Zoo('svr with scaler', True, {'C': [1, 10,100,1000]}, svm.SVR()),
Zoo('gbdt', False, {'n_estimators':[60, 100,200] , 'max_depth':[4,6] , 'learning_rate': [0.05,0.1] } , GradientBoostingRegressor(n_estimators=100, max_depth=6,loss='ls')),
Zoo('rf', False, {'n_estimators':[100,200,600], 'max_depth':[4,6,8]}, RandomForestRegressor(n_estimators=100, max_depth=6,max_features='auto')) ,
Zoo('extraRF',False, {'n_estimators':[100,200,600], 'max_depth':[4,6,8]}, ExtraTreesRegressor(n_estimators=100, max_depth=6 , max_features='auto')),
Zoo('lgb',False, {'n_estimators':[100,200,600], 'num_leaves':[6,8,16],'learning_rate': [0.05,0.1] } , lgb.LGBMRegressor(n_estimators=100, num_leaves=8 , objective='mse'))
]
for name , flag, param_grid, reg in zoo:
if flag:
X_g = X.toarray()
X_g = MaxAbsScaler().fit(X_g).transform(X_g)
else:
X_g = X
gs = GridSearchCV(estimator=reg, param_grid=param_grid, scoring='neg_mean_squared_error', verbose=2 ,cv = 5)
gs.fit(X_g,y)
logging.info('zoo {0} best_result = {1} best_param = {2}'.format(name, gs.best_score_, gs.best_params_))
def trainlincv():
"""
'mean_test_score': array([-0.03932149]),
{'rank_test_score': array([1], dtype=int32),
'split4_test_score': array([-0.0461944]),
'mean_train_score': array([-0.02850378]),
'split0_train_score': array([-0.0309599]),
'std_test_score': array([ 0.00681591]),
'std_train_score': array([ 0.00131567]),
'split1_train_score': array([-0.02703828]),
'split0_test_score': array([-0.03057691]),
'mean_test_score': array([-0.03932149]),
'split3_train_score': array([-0.02786938]),
'split2_train_score': array([-0.02825937]),
'std_score_time': array([ 3.78773219e-05]),
'params': [{'alpha': 0.5}],
'std_fit_time': array([ 0.6320562]),
'split4_train_score': array([-0.02839197]),
'split2_test_score': array([-0.03484985]),
'split3_test_score': array([-0.03664231]),
'mean_score_time': array([ 0.00142102]),
'mean_fit_time': array([ 6.05255213]),
'param_alpha': masked_array(data = [0.5],mask = [False],fill_value = ?),
'split1_test_score': array([-0.04834397])
}
:return:
"""
X, y = load_svmlight_file('train.txt')
X = X.toarray() # toarray() or todense() 后, cv mse会变得非常高,出现 ill-conditioned matrix.
X = X[:,56:] #采用这个以后,cv mse还是很高 .
# scaler = StandardScaler().fit(X)
#scaler = MaxAbsScaler().fit(X)
# X = scaler.transform(X)
#y = np.array(y).reshape((len(y),1))
#scaler = StandardScaler().fit(y)
#y = scaler.transform(y)
estimator = linear_model.Ridge(alpha=0.5)
param_grid = {
'alpha': [0,0.5]
#'alpha': [1e4,2e4,3e4,4e4,5e4,6e4,7e4,8e4]
}
gbm = GridSearchCV(estimator, param_grid,verbose=2,scoring='neg_mean_squared_error',cv=5)
gbm.fit(X,y)
logging.info('param:score = {0}:{1}'.format(gbm.best_params_ ,gbm.best_score_))
print (gbm.best_params_)
print (gbm.best_score_)
print (gbm.cv_results_)
def predict_with_leaf(lgb, linr , sample , xlsx, tag):
X_test, y_test = load_svmlight_file(sample)
X_leaf = lgb.apply(X_test)
y_pred = linr.predict(X_leaf)
df = pd.read_excel(xlsx, header=0)
pd.DataFrame({'id': df['ID'].values, 'score': y_pred})\
.to_csv(sample + '.leafpred.{0}.csv'.format(tag),
index=False,header=False)
def lgblin():
"""
叶子节点 + Linear Regression
:return:
"""
X , y = load_svmlight_file("train.txt")
params = {
'objective': 'mse',
'num_leaves': 8,
'learning_rate': 0.05,
'min_child_samples': 60, # 这个题目比较关键 .
# 'subsample': 0.9,
'n_estimators': 100,
'silent': False,
}
gbm = lgb.LGBMRegressor(**params)
gbm.fit(X,y , eval_metric='mse', eval_set=[(X,y)])
X_leaf = gbm.apply(X)
ridge = linear_model.Ridge(alpha=0.5)
ridge.fit(X_leaf, y)
mse = mean_squared_error(y , ridge.predict(X_leaf))
logging.info('leaf mse = {0}'.format(mse))
predict_with_leaf(gbm, ridge, 'testA.txt' , 'testA.xlsx' , '')
predict_with_leaf(gbm, ridge, 'testB.txt', 'testB.xlsx', '')
def rf():
X, y = load_svmlight_file('train.txt')
reg = RandomForestRegressor(n_estimators=600, max_depth=8,max_features='auto',verbose=2)
reg.fit(X,y)
logging.info('rf mse = ' + str(mean_squared_error(y, reg.predict(X))))
predict(reg, 'testA.txt', 'testA.xlsx', 'rf')
predict(reg, 'testB.txt', 'testB.xlsx', 'rf')
def linrfe():
"""
为了快速计算完成, step=xx 需要设置大一些.
ridge : 0.28+
ridge + RFE: 0.28+
线上却有0.045 ; 线下的这个测试看来完全不准确
"""
X, y = load_svmlight_file('train.txt')
X = X.toarray()
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
reg = linear_model.Ridge(alpha=0.5)
reg.fit(X,y)
print 'r^2=', reg.score(X,y)
print 'train mse = ', mean_squared_error(y, reg.predict(X))
rfe = RFE(estimator=reg, n_features_to_select=500, step=1000,verbose=2)
rfe.fit(X, y)
print 'rfe r^2 = ' , rfe.score(X, y)
print 'rfe mse =' , mean_squared_error(y, rfe.predict(X))
X_rfe = rfe.transform(X)
poly = PolynomialFeatures(degree=2, interaction_only=True)
X_poly = poly.fit_transform(X_rfe) #直接处理会有 MemoryError
param_grid = {'alpha' :[0.5,1,10,100,1000,1e4,3e4]}
gbm = GridSearchCV(reg, param_grid,verbose=2,scoring='neg_mean_squared_error',cv=5)
gbm.fit(X_poly, y)
logging.info('after rfe poly, best_result = {0}'.format(gbm.best_score_))
logging.info('after rfe poly, best_param= {0}'.format(gbm.best_params_))
#mse = reg.score(X_poly, y)
#print 'after poly ' ,mean_squared_error(y, reg.predict(X_poly))
#logging.info('rfe r^2 score= ' + str(mse) )
params = {
'objective': 'mse',
'num_leaves': 8,
'learning_rate': 0.05,
'min_child_samples': 60, # 这个题目比较关键 .
# 'subsample': 0.9,
'n_estimators': 100,
'silent': False,
}
gbm = lgb.LGBMRegressor(**params)
gbm.fit(X_poly, y, eval_metric='mse', eval_set=[(X_poly,y)])
logging.info('train lgb of poly = {0}'.format( mean_squared_error(y , gbm.predict(X_poly, y))))
# X = rfe.transform(X)
# logging.info('begin to predict')
# predict(rfe, 'testA.txt', 'testA.xlsx', 'ridge.rfe')
# predict(rfe, 'testB.txt', 'testB.xlsx', 'ridge.rfe')
def trainlinear():
X, y = load_svmlight_file('train.txt')
X = X.toarray()
estimators = [('MaxAbs', MaxAbsScaler()), ('ridge', linear_model.Ridge(alpha=0.5))]
pipe = Pipeline(estimators)
pipe.fit(X,y)
y_pred = pipe.predict(X)
logging.info('trainlinear mse = ' + str(mean_squared_error(y,y_pred)))
predict(pipe, 'testA.txt', 'testA.xlsx', 'ridge')
predict(pipe, 'testB.txt', 'testB.xlsx', 'ridge')
def trainlinmap():
"""
mse ~ alpha
探测 alpha 对整体mse的影响,几乎无影响.
INFO alpha:mse = 0:0.0289213035695
Fri, 22 Dec 2017 10:47:24 train.py[line:49] INFO alpha:mse = 1:0.0289152061282
Fri, 22 Dec 2017 10:47:34 train.py[line:49] INFO alpha:mse = 0.5:0.0289196872644
Fri, 22 Dec 2017 10:47:43 train.py[line:49] INFO alpha:mse = 0.1:0.0289158636722
Fri, 22 Dec 2017 10:47:52 train.py[line:49] INFO alpha:mse = 0.01:0.0289159148539
Fri, 22 Dec 2017 10:48:02 train.py[line:49] INFO alpha:mse = 0.001:0.0289158416954
Fri, 22 Dec 2017 10:48:11 train.py[line:49] INFO alpha:mse = 0.0001:0.028912850027
:return:
"""
X, y = load_svmlight_file('train.txt')
pbuf = [0, 1, 0.5, 0.1, 0.01 ,0.001,0.0001]
buf = []
for alpha in pbuf:
reg = linear_model.Ridge(alpha=alpha)
reg.fit(X,y)
y_pred = reg.predict(X)
buf.append(mean_squared_error(y,y_pred))
logging.info('alpha:mse = {0}:{1}'.format(alpha, buf[-1]))
pd.DataFrame({'alpha': pbuf , 'mse': buf}).to_csv('linmap.csv',index=False)
plt.plot(pbuf, buf)
plt.show()
# predict(reg, 'testA.txt', 'testA.xlsx', 'ridge')
# predict(reg, 'testB.txt', 'testB.xlsx', 'ridge')
def trainsvm():
"""
均值估计,完全不可用
:return:
"""
X, y = load_svmlight_file('train.txt')
X = X.toarray()
pipe = Pipeline([("scaler", MaxAbsScaler()) , ('svm', svm.SVR(C=1))])
"""
kernel = rbf is default .
"""
# clf = svm.SVR(kernel='rbf',C=1)
pipe.fit(X,y)
print mean_squared_error(y, pipe.predict(X))
predict(pipe, 'testA.txt', 'testA.xlsx','svm')
predict(pipe, 'testB.txt', 'testB.xlsx','svm')
def predict(gbm, sample , xlsx, tag=''):
X_test ,y_test = load_svmlight_file(sample)
X_test = X_test.toarray()
y_pred = gbm.predict(X_test)
y_pred = np.array([ 4 if i > 4 else i for i in y_pred ])
df = pd.read_excel(xlsx,header=0)
print len(y_pred)
print y_pred
print len(df['ID'].values)
pd.DataFrame({'id': df['ID'].values , 'score': y_pred}).to_csv( sample+'.pred.{0}.csv'.format(tag), index=False,header=False)
def main():
params = {
'objective': 'mse',
'num_leaves': 8,
'learning_rate': 0.05,
'min_child_samples': 60, #这个题目比较关键 .
# 'subsample': 0.9,
'n_estimators': 100,
'silent': False,
}
params = {
'objective': 'mse',
'num_leaves': 16,
'learning_rate': 0.02,
'min_child_samples': 20, #这个题目比较关键 .
# 'subsample': 0.9,
'n_estimators': 100,
'silent': False,
}
gbm = lgb.LGBMRegressor(**params)
train_txt = 'train.cls.txt'
testA_txt= 'testA.cls.txt'
testB_txt= 'testB.cls.txt'
tag = 'lgb.cls'
X, y = load_svmlight_file(train_txt)
X_train , y_train , X_vldt, y_vldt = train_test_split(X,y)
print 'begin to fit'
gbm.fit(X,y,eval_metric='mse', eval_set=[(X,y)])
predict(gbm, testA_txt, 'testA.xlsx',tag)
predict(gbm, testB_txt, 'testB.xlsx',tag)
imp = pd.DataFrame({
'fid': range(1, len(gbm.feature_importances_) +1 ) ,
'imp': gbm.feature_importances_
})
fidmap = pd.read_csv('fid.map',header=0)
sns.kdeplot(gbm.feature_importances_)
pd.merge(fidmap, imp, on ='fid').to_csv('fea.imp',index=False)
# plt.plot(gbm.feature_importances_)
#
#
# plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--method", default='lgb')
args = parser.parse_args()
if args.method == 'lgb':
main()
elif args.method == 'zoo':
zoocv()
elif args.method == 'lgbcv':
lgbcv()
elif args.method == 'linrfe':
linrfe()
elif args.method == 'rf':
rf()
elif args.method == 'lgblin':
lgblin()
elif args.method == 'svm':
trainsvm()
elif args.method == 'linear':
trainlinear()
elif args.method == 'lincv':
trainlincv()
elif args.method == 'linmap':
trainlinmap()
else:
pass
|
apache-2.0
|
TuSimple/mxnet
|
example/gluon/kaggle_k_fold_cross_validation.py
|
26
|
6854
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This example provides an end-to-end pipeline for a common Kaggle competition.
# The entire pipeline includes common utilities such as k-fold cross validation
# and data pre-processing.
#
# Specifically, the example studies the `House Prices: Advanced Regression
# Techniques` challenge as a case study.
#
# The link to the problem on Kaggle:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques
import numpy as np
import pandas as pd
from mxnet import autograd
from mxnet import gluon
from mxnet import ndarray as nd
# After logging in www.kaggle.com, the training and testing data sets can be downloaded at:
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/train.csv
# https://www.kaggle.com/c/house-prices-advanced-regression-techniques/download/test.csv
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
all_X = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'],
test.loc[:, 'MSSubClass':'SaleCondition']))
# Get all the numerical features and apply standardization.
numeric_feas = all_X.dtypes[all_X.dtypes != "object"].index
all_X[numeric_feas] = all_X[numeric_feas].apply(lambda x:
(x - x.mean()) / (x.std()))
# Convert categorical feature values to numerical (including N/A).
all_X = pd.get_dummies(all_X, dummy_na=True)
# Approximate N/A feature value by the mean value of the current feature.
all_X = all_X.fillna(all_X.mean())
num_train = train.shape[0]
# Convert data formats to NDArrays to feed into gluon.
X_train = all_X[:num_train].as_matrix()
X_test = all_X[num_train:].as_matrix()
y_train = train.SalePrice.as_matrix()
X_train = nd.array(X_train)
y_train = nd.array(y_train)
y_train.reshape((num_train, 1))
X_test = nd.array(X_test)
square_loss = gluon.loss.L2Loss()
def get_rmse_log(net, X_train, y_train):
"""Gets root mse between the logarithms of the prediction and the truth."""
num_train = X_train.shape[0]
clipped_preds = nd.clip(net(X_train), 1, float('inf'))
return np.sqrt(2 * nd.sum(square_loss(
nd.log(clipped_preds), nd.log(y_train))).asscalar() / num_train)
def get_net():
"""Gets a neural network. Better results are obtained with modifications."""
net = gluon.nn.Sequential()
with net.name_scope():
net.add(gluon.nn.Dense(50, activation="relu"))
net.add(gluon.nn.Dense(1))
net.initialize()
return net
def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size):
"""Trains the model."""
dataset_train = gluon.data.ArrayDataset(X_train, y_train)
data_iter_train = gluon.data.DataLoader(dataset_train, batch_size,
shuffle=True)
trainer = gluon.Trainer(net.collect_params(), 'adam',
{'learning_rate': learning_rate,
'wd': weight_decay})
net.initialize(force_reinit=True)
for epoch in range(epochs):
for data, label in data_iter_train:
with autograd.record():
output = net(data)
loss = square_loss(output, label)
loss.backward()
trainer.step(batch_size)
avg_loss = get_rmse_log(net, X_train, y_train)
if epoch > verbose_epoch:
print("Epoch %d, train loss: %f" % (epoch, avg_loss))
return avg_loss
def k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size):
"""Conducts k-fold cross validation for the model."""
assert k > 1
fold_size = X_train.shape[0] // k
train_loss_sum = 0.0
test_loss_sum = 0.0
for test_idx in range(k):
X_val_test = X_train[test_idx * fold_size: (test_idx + 1) *
fold_size, :]
y_val_test = y_train[test_idx * fold_size: (test_idx + 1) * fold_size]
val_train_defined = False
for i in range(k):
if i != test_idx:
X_cur_fold = X_train[i * fold_size: (i + 1) * fold_size, :]
y_cur_fold = y_train[i * fold_size: (i + 1) * fold_size]
if not val_train_defined:
X_val_train = X_cur_fold
y_val_train = y_cur_fold
val_train_defined = True
else:
X_val_train = nd.concat(X_val_train, X_cur_fold, dim=0)
y_val_train = nd.concat(y_val_train, y_cur_fold, dim=0)
net = get_net()
train_loss = train(net, X_val_train, y_val_train, epochs, verbose_epoch,
learning_rate, weight_decay, batch_size)
train_loss_sum += train_loss
test_loss = get_rmse_log(net, X_val_test, y_val_test)
print("Test loss: %f" % test_loss)
test_loss_sum += test_loss
return train_loss_sum / k, test_loss_sum / k
# The sets of parameters. Better results are obtained with modifications.
# These parameters can be fine-tuned with k-fold cross-validation.
k = 5
epochs = 100
verbose_epoch = 95
learning_rate = 0.3
weight_decay = 100
batch_size = 100
train_loss, test_loss = \
k_fold_cross_valid(k, epochs, verbose_epoch, X_train, y_train,
learning_rate, weight_decay, batch_size)
print("%d-fold validation: Avg train loss: %f, Avg test loss: %f" %
(k, train_loss, test_loss))
def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size):
"""Trains the model and predicts on the test data set."""
net = get_net()
_ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate,
weight_decay, batch_size)
preds = net(X_test).asnumpy()
test['SalePrice'] = pd.Series(preds.reshape(1, -1)[0])
submission = pd.concat([test['Id'], test['SalePrice']], axis=1)
submission.to_csv('submission.csv', index=False)
learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
weight_decay, batch_size)
|
apache-2.0
|
Averroes/statsmodels
|
examples/python/robust_models_0.py
|
33
|
2992
|
## Robust Linear Models
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
# ## Estimation
#
# Load data:
data = sm.datasets.stackloss.load()
data.exog = sm.add_constant(data.exog)
# Huber's T norm with the (default) median absolute deviation scaling
huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT())
hub_results = huber_t.fit()
print(hub_results.params)
print(hub_results.bse)
print(hub_results.summary(yname='y',
xname=['var_%d' % i for i in range(len(hub_results.params))]))
# Huber's T norm with 'H2' covariance matrix
hub_results2 = huber_t.fit(cov="H2")
print(hub_results2.params)
print(hub_results2.bse)
# Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix
andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave())
andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3")
print('Parameters: ', andrew_results.params)
# See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options
#
# ## Comparing OLS and RLM
#
# Artificial data with outliers:
nsample = 50
x1 = np.linspace(0, 20, nsample)
X = np.column_stack((x1, (x1-5)**2))
X = sm.add_constant(X)
sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger
beta = [5, 0.5, -0.0]
y_true2 = np.dot(X, beta)
y2 = y_true2 + sig*1. * np.random.normal(size=nsample)
y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample)
# ### Example 1: quadratic function with linear truth
#
# Note that the quadratic term in OLS regression will capture outlier effects.
res = sm.OLS(y2, X).fit()
print(res.params)
print(res.bse)
print(res.predict())
# Estimate RLM:
resrlm = sm.RLM(y2, X).fit()
print(resrlm.params)
print(resrlm.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(x1, y2, 'o',label="data")
ax.plot(x1, y_true2, 'b-', label="True")
prstd, iv_l, iv_u = wls_prediction_std(res)
ax.plot(x1, res.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
# ### Example 2: linear function with linear truth
#
# Fit a new OLS model using only the linear term and the constant:
X2 = X[:,[0,1]]
res2 = sm.OLS(y2, X2).fit()
print(res2.params)
print(res2.bse)
# Estimate RLM:
resrlm2 = sm.RLM(y2, X2).fit()
print(resrlm2.params)
print(resrlm2.bse)
# Draw a plot to compare OLS estimates to the robust estimates:
prstd, iv_l, iv_u = wls_prediction_std(res2)
fig, ax = plt.subplots()
ax.plot(x1, y2, 'o', label="data")
ax.plot(x1, y_true2, 'b-', label="True")
ax.plot(x1, res2.fittedvalues, 'r-', label="OLS")
ax.plot(x1, iv_u, 'r--')
ax.plot(x1, iv_l, 'r--')
ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM")
ax.legend(loc="best")
|
bsd-3-clause
|
shyamalschandra/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
AOSPU/external_chromium_org
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
9
|
11274
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
pkg_ver_dir = os.path.join(nacl_dir, 'build', 'package_version')
RunCommand([python, os.path.join(pkg_ver_dir, 'package_version.py'),
'--exclude', 'arm_trusted',
'--exclude', 'pnacl_newlib',
'--exclude', 'nacl_arm_newlib',
'sync', '--extract'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
praisondani/dblp
|
pipeline/filtering.py
|
1
|
7354
|
import os
import pandas as pd
import luigi
import util
import aminer
import config
class PathBuilder(object):
def convert_path(self, fname, suffix):
base, ext = os.path.splitext(fname)
fname = '%s-%s' % (os.path.basename(base), suffix)
fname = '%s.csv' if not ext else '%s%s' % (fname, ext)
return os.path.join(config.filtered_dir, fname)
class RemovePapersNoVenueOrYear(luigi.Task, PathBuilder):
"""Remove papers with either no year or no venue listed."""
def requires(self):
return aminer.CSVPaperRecords()
def output(self):
papers_file = self.input()
fpath = self.convert_path(papers_file.path, 'venue-and-year')
return luigi.LocalTarget(fpath)
def run(self):
with self.input().open() as paper_fd:
df = pd.read_csv(paper_fd)
df = df[(~df['venue'].isnull()) & (~df['year'].isnull())]
with self.output().open('w') as outfile:
df.to_csv(outfile, index=False)
class RemoveUniqueVenues(luigi.Task, PathBuilder):
"""Remove papers with unique venues (occur only once in dataset)."""
def requires(self):
return RemovePapersNoVenueOrYear()
def output(self):
papers_file = self.input()
fpath = self.convert_path(papers_file.path, 'no-unique-venues')
return luigi.LocalTarget(fpath)
def run(self):
with self.input().open() as paper_file:
df = pd.read_csv(paper_file)
multiple = df.groupby('venue')['venue'].transform(len) > 1
filtered = df[multiple]
with self.output().open('w') as outfile:
filtered.to_csv(outfile, index=False)
class YearFiltering(object):
start = luigi.IntParameter()
end = luigi.IntParameter()
def get_fpath(self, fname, ext='csv'):
fpath = os.path.join(config.filtered_dir, fname)
return '%s-%d-%d.%s' % (fpath, self.start, self.end, ext)
class FilterPapersToYearRange(luigi.Task, YearFiltering):
"""Filter paper records to those published in particular range of years."""
def requires(self):
return [RemoveUniqueVenues(), aminer.CSVRefsRecords()]
def output(self):
return [luigi.LocalTarget(self.get_fpath('paper')),
luigi.LocalTarget(self.get_fpath('refs'))]
def run(self):
papers_file, refs_file = self.input()
paper_out, refs_out = self.output()
with papers_file.open() as pfile:
papers_df = pd.read_csv(pfile)
# Filter based on range of years
papers_df['year'] = papers_df['year'].astype(int)
filtered = papers_df[(papers_df['year'] >= self.start) &
(papers_df['year'] <= self.end)]
# Save filtered paper records
with paper_out.open('w') as outfile:
filtered.to_csv(outfile, index=False)
paper_ids = filtered['id'].unique()
# Filter and save references based on paper ids.
with refs_file.open() as rfile:
refs_df = pd.read_csv(rfile)
filtered = refs_df[(refs_df['paper_id'].isin(paper_ids)) &
(refs_df['ref_id'].isin(paper_ids))]
with refs_out.open('w') as outfile:
filtered.to_csv(outfile, index=False)
class FilteredCSVPapers(luigi.Task, YearFiltering):
"""Abstraction to access only filtered papers output file."""
def requires(self):
if self.start is None or self.end is None:
return aminer.ParsePapersToCSV()
else:
return FilterPapersToYearRange(self.start, self.end)
def output(self):
return self.input()[0]
class FilteredCSVRefs(luigi.Task, YearFiltering):
"""Abstraction to access only filtered refs output file."""
def requires(self):
if self.start is None or self.end is None:
return aminer.ParsePapersToCSV()
else:
return FilterPapersToYearRange(self.start, self.end)
def output(self):
return self.input()[1]
class YearFilteringNonPaper(YearFiltering):
"""Filter data records which depend on paper ids for filtering."""
@property
def papers_file(self):
for file_obj in util.flatten(self.input()):
if 'paper' in file_obj.path:
return file_obj
def read_paper_ids(self):
with self.papers_file.open() as papers_file:
df = pd.read_csv(papers_file, header=0, usecols=(0,))
return df['id'].unique()
class FilterVenuesToYearRange(luigi.Task, YearFilteringNonPaper):
"""Filter venue records to a particular range of years."""
def requires(self):
return FilterPapersToYearRange(self.start, self.end)
def output(self):
return luigi.LocalTarget(self.get_fpath('venue'))
def run(self):
with self.papers_file.open() as pfile:
paper_df = pd.read_csv(pfile, header=0, usecols=(0,2))
unique_venues = paper_df['venue'].unique()
with self.output().open() as afile:
afile.write('\n'.join(unique_venues))
class FilterAuthorshipsToYearRange(luigi.Task, YearFilteringNonPaper):
"""Filter authorship records to a particular range of years."""
def requires(self):
return (FilterPapersToYearRange(self.start, self.end),
aminer.ParseAuthorshipsToCSV())
@property
def author_file(self):
return self.input()[1]
def output(self):
return luigi.LocalTarget(self.get_fpath('author'))
def run(self):
paper_ids = self.read_paper_ids()
with self.author_file.open() as afile:
author_df = pd.read_csv(afile)
# Filter and write authorship records.
filtered = author_df[author_df['paper_id'].isin(paper_ids)]
with self.output().open('w') as outfile:
filtered.to_csv(outfile, index=False)
class FilterAuthorNamesToYearRange(luigi.Task, YearFiltering):
"""Filter author name,id records to particular range of years."""
def requires(self):
return (FilterAuthorshipsToYearRange(self.start, self.end),
aminer.ParseAuthorNamesToCSV())
@property
def author_file(self):
return self.input()[0]
@property
def person_file(self):
return self.input()[1]
def output(self):
return luigi.LocalTarget(self.get_fpath('person'))
def read_author_ids(self):
with self.author_file.open() as afile:
author_df = pd.read_csv(afile)
return author_df['author_id'].unique()
def run(self):
"""Filter and write person records."""
author_ids = self.read_author_ids()
with self.person_file.open() as person_file:
person_df = pd.read_csv(person_file)
filtered = person_df[person_df['id'].isin(author_ids)]
with self.output().open('w') as outfile:
filtered.to_csv(outfile, index=False)
class FilterAllCSVRecordsToYearRange(luigi.Task, YearFiltering):
def requires(self):
"""Trigger all tasks for year filtering."""
yield FilterPapersToYearRange(self.start, self.end)
yield FilterAuthorshipsToYearRange(self.start, self.end)
yield FilterAuthorNamesToYearRange(self.start, self.end)
yield FilterVenuesToYearRange(self.start, self.end)
if __name__ == "__main__":
luigi.run()
|
mit
|
pratapvardhan/scikit-image
|
doc/examples/features_detection/plot_gabor.py
|
21
|
4450
|
"""
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are
similar to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
|
bsd-3-clause
|
walterreade/scikit-learn
|
examples/linear_model/plot_bayesian_ridge.py
|
50
|
2733
|
"""
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
bsd-3-clause
|
danche354/Sequence-Labeling
|
preprocessing/ner-auto-encoder-2/test_auto_encoder.py
|
2
|
2502
|
from keras.models import load_model
import pandas as pd
import numpy as np
import sys
import os
# change dir to train file, for environment
os.chdir('../../ner/')
# add path
sys.path.append('../')
sys.path.append('../tools')
from tools import load_data
from tools import prepare
epoch = sys.argv[1]
test = sys.argv[2]
path = './model/word-hash-auto-encoder-128/model_epoch_%s.h5'%epoch
model = load_model(path)
train_data = load_data.load_ner(dataset='eng.train')
dev_data = load_data.load_ner(dataset='eng.testa')
test_data = load_data.load_ner(dataset='eng.testb')
train_word = []
dev_word = []
test_word = []
# all word
[train_word.extend(list(each[0])) for each in train_data]
[dev_word.extend(list(each[0])) for each in dev_data]
[test_word.extend(list(each[0])) for each in test_data]
train_word = [each.strip().lower() for each in train_word]
dev_word = [each.strip().lower() for each in dev_word]
test_word = [each.strip().lower() for each in test_word]
train_word_dict = {}
dev_word_dict = {}
test_word_dict = {}
for each in train_word:
if each in train_word_dict:
train_word_dict[each] += 1
else:
train_word_dict[each] = 1
for each in dev_word:
if each in dev_word_dict:
dev_word_dict[each] += 1
else:
dev_word_dict[each] = 1
for each in test_word:
if each in test_word_dict:
test_word_dict[each] += 1
else:
test_word_dict[each] = 1
train_word = train_word_dict.keys()
dev_word = dev_word_dict.keys()
test_word = test_word_dict.keys()
if test=='dev':
word = dev_word[:20]
elif test=='test':
word = test_word[:20]
else:
word = train_word[:20]
word_hashing = prepare.prepare_auto_encoder(batch=word, task='ner')
word_hashing = word_hashing.toarray()
output = model.predict_on_batch(word_hashing)
while True:
number = input('please input word index: ')
exist = word[number]
print('word is: ' + exist)
if exist in train_word_dict:
print(' in train: ' + str(train_word_dict[exist]) + ' times.')
if exist in dev_word_dict:
print(' in dev: ' + str(dev_word_dict[exist]) + ' times.')
if exist in test_word_dict:
print(' in test: ' + str(test_word_dict[exist]) + ' times.')
print('-'*60)
ind = []
for i, e in enumerate(word_hashing[number]):
if e==1:
print(i)
ind.append(i)
print('word_hasing'+ '-'*60)
for i in ind:
print(output[number][i])
print('output'+ '-'*60)
|
mit
|
McIntyre-Lab/papers
|
fear_ase_2016/scripts/mclib_Python/wiggle.py
|
7
|
7451
|
import os
import logging
logger = logging.getLogger()
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.lines import Line2D
from matplotlib.collections import PatchCollection
from matplotlib import gridspec
class GeneModel(object):
""" Class to construct a gene model """
def __init__(self, geneObj, height=2):
""" Arguments:
geneObj (obj) = a gene object created from a subclass of _Gene in mclib.gff
height (int) = the height of the exon model
Attributes:
xLoc (tuple) = Gene start and end coordinates.
yLoc (list) = List of y-coordinates for plotting each transcript on a different row
patches (list) = List of gene models as matplotlib patches
"""
self.xLoc = (geneObj.start, geneObj.end)
# Each transcript needs to be plotted on a different row. Create a list of
# y-coordinates for plotting.
self.yLoc = self._get_y(1, 5, geneObj.transCnt)
self.patches = []
# Make list of transcript IDs and sort them depending on which strand the gene
# is on. Sort by start if on + strand and by end if on - strand.
tsList = geneObj.transcript.keys()
if geneObj.strand == '-':
tsList.sort(key=lambda x: geneObj.transcript[x]['tsEnd'])
else:
tsList.sort(key=lambda x: geneObj.transcript[x]['tsStart'])
# Draw the gene model as matplotlib patches. Iterate through the list
# of transcripts and draw UTR and CDS if they are available, otherwise
# draw exons.
for index, ts in enumerate(tsList):
try:
# Add 3' UTR
self._build_patch(geneObj.transcript[ts]['utr'][0], index, height=height, color='grey')
# Add CDS
self._build_patch(geneObj.transcript[ts]['cds'], index, height=height)
# Add 5' UTR
self._build_patch(geneObj.transcript[ts]['utr'][1], index, height=height, color='grey')
except:
# Just plot exons
self._build_patch(geneObj.transcript[ts]['exons'], index, height=height)
# Build the introns
self._build_patch(geneObj.transcript[ts]['introns'], index, height=height, intron=True)
def _build_patch(self, annoList, index, height=2, color='black', intron=False):
""" Create a rectangle patch to represent a exon
Arguments:
annoList (list) = list of start end coordinates for gene annotations
index (int) = number of which transcript we are on, so I can plot different transcripts on different lines
height (int) = height of the exons in the gen model
color (str) = color for exon blocks
intron (bool) = indicate if annoList are introns
"""
for coord in annoList:
start = coord[0] - 1 # coordinate convert from 1-based annotation to 0-based wiggle
end = coord[1]
width = end - start
if intron:
# Put intron in the middle of the exon
yloc = self.yLoc[index] + height / 2 - 0.025
self.patches.append(Rectangle((start, yloc), width, 0.05, fill=True, color=color))
else:
self.patches.append(Rectangle((start, self.yLoc[index]), width, height, fill=True, color=color))
def _get_y(self, start, step, count):
""" Return a list of numbers equally spaced by a step size
Arguments:
start (int) = starting location, typically use 1
step (int) = step size to space the number
count (int) = number of equally spaced number to produce
"""
yList = []
for i in range(0,count):
yList.append(start)
start += step
return yList
def plot_wiggle(pileDict, outName, chrom, start, end, geneModel=None, fusionModel=None, variantPos=None, title=None):
""" Function to construct a wiggle plot with gene models
Arguments:
pileDict (dict) = where keys are genome coordinates and values are counts at that position
outName (str) = name of the output png
geneModel (obj) = a mclib/wiggle.py GeneModel object
fusionModel (obj) =
variantPos (List) = a list of positions that have a variant of interest
title (str) = a title for the plot
"""
# Set up the figure
fig = plt.figure(figsize=(20, 10))
# Create multiple rows for each subplot
if geneModel and variantPos and fusionModel:
gs = gridspec.GridSpec(4, 1, height_ratios=[1, .08, 1, .5])
ax1 = plt.subplot(gs[0])
vax = plt.subplot(gs[1], sharex=ax1)
gax = plt.subplot(gs[2], sharex=ax1)
fax = plt.subplot(gs[3], sharex=ax1)
elif geneModel and variantPos:
gs = gridspec.GridSpec(3, 1, height_ratios=[1, .08, 1])
ax1 = plt.subplot(gs[0])
vax = plt.subplot(gs[1], sharex=ax1)
gax = plt.subplot(gs[2], sharex=ax1)
elif geneModel and fusionModel:
gs = gridspec.GridSpec(3, 1, height_ratios=[1, 1, .5])
ax1 = plt.subplot(gs[0])
gax = plt.subplot(gs[1], sharex=ax1)
fax = plt.subplot(gs[2], sharex=ax1)
elif variantPos and fusionModel:
gs = gridspec.GridSpec(3, 1, height_ratios=[1, .08,.5])
ax1 = plt.subplot(gs[0])
vax = plt.subplot(gs[1], sharex=ax1)
fax = plt.subplot(gs[2], sharex=ax1)
elif geneModel:
gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])
ax1 = plt.subplot(gs[0])
gax = plt.subplot(gs[1], sharex=ax1)
elif variantPos:
gs = gridspec.GridSpec(2, 1, height_ratios=[1, .08])
ax1 = plt.subplot(gs[0])
vax = plt.subplot(gs[1], sharex=ax1)
elif fusionModel:
gs = gridspec.GridSpec(2, 1, height_ratios=[1,.5])
ax1 = plt.subplot(gs[0])
fax = plt.subplot(gs[1], sharex=ax1)
else:
ax1 = plt.subplot(111)
# Make wiggle plot
## ax1 is the wiggle
ax1.set_ylim(0, max(pileDict.values())+150)
ax1.set_xlim(start-300, end+300)
n, bins, patches = ax1.hist(pileDict.keys(), bins=len(pileDict.keys()), weights=pileDict.values())
if variantPos:
# Plot variants
logger.debug('Creating variant plot')
## vax will be the variant subplot
vax.scatter(variantPos, [0.5]*len(variantPos), marker="^")
vax.axis('off') # Hide y-axis on gene model plot
if geneModel:
# Plot gene models
logger.debug('Creating geneModel plot')
## gax will be the gene model subplot
gax.set_ylim(max(geneModel.yLoc)+3, min(geneModel.yLoc)-3)
gax.axis('off') # Hide y-axis on gene model plot
# Put the gene models together and create their plots
p = PatchCollection(geneModel.patches, match_original=True)
gax.add_collection(p)
if fusionModel:
# Plot gene models
logger.debug('Creating fusion plot')
## fax will be the fusion model subplot
fax.set_ylim(min(geneModel.yLoc), max(geneModel.yLoc)+5)
gax.axis('off') # Hide y-axis on gene model plot
# Put the fusion models together and create their plots
p = PatchCollection(fusionModel.patches, match_original=True)
fax.add_collection(p)
# Save output
fig.suptitle(title, fontsize=20, fontweight='bold')
plt.show()
fig.savefig(outName)
|
lgpl-3.0
|
ricardog/raster-project
|
setup.py
|
1
|
1134
|
from setuptools import setup, find_packages
setup(
name='projections',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Click',
'gdal',
'fiona',
'geopy',
'joblib',
'matplotlib',
'netCDF4',
'numba',
'numpy',
'pandas',
'pylru',
'pyparsing',
'rasterio==0.36.0',
'rpy2',
'setuptools',
'shapely',
'xlrd',
],
entry_points='''
[console_scripts]
extract_values=projections.scripts.extract_values:main
gen_hyde=projections.scripts.gen_hyde:main
gen_sps=projections.scripts.gen_sps:main
hyde2nc=projections.scripts.hyde2nc:main
nc_dump=projections.scripts.nc_dump:main
nctomp4=projections.scripts.nctomp4:main
project=projections.scripts.project:cli
r2py=projections.scripts.r2py:main
rview=projections.scripts.rview:main
tifftomp4=projections.scripts.tifftomp4:main
tiffcmp=projections.scripts.tiffcmp:main
''',
build_ext='''
include_dirs=/usr/local/include
'''
)
|
apache-2.0
|
vhaasteren/scipy
|
scipy/signal/windows.py
|
4
|
51595
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius van Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use ``tau = -(M-1) / ln(x)``
if ``x`` is the fraction of the window remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, exponential,
flattop, parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hann
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = slepian
elif winstr in ['cosine', 'halfcosine']:
winfunc = cosine
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
elif winstr in ['exponential', 'poisson']:
winfunc = exponential
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
bsd-3-clause
|
ephes/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
scw/geopandas
|
geopandas/tools/sjoin.py
|
6
|
4457
|
import numpy as np
import pandas as pd
import rtree
from shapely import prepared
def sjoin(left_df, right_df, how='inner', op='intersects',
lsuffix='left', rsuffix='right', **kwargs):
"""Spatial join of two GeoDataFrames.
left_df, right_df are GeoDataFrames
how: type of join
left -> use keys from left_df; retain only left_df geometry column
right -> use keys from right_df; retain only right_df geometry column
inner -> use intersection of keys from both dfs;
retain only left_df geometry column
op: binary predicate {'intersects', 'contains', 'within'}
see http://toblerity.org/shapely/manual.html#binary-predicates
lsuffix: suffix to apply to overlapping column names (left GeoDataFrame)
rsuffix: suffix to apply to overlapping column names (right GeoDataFrame)
"""
allowed_hows = ['left', 'right', 'inner']
if how not in allowed_hows:
raise ValueError("`how` was \"%s\" but is expected to be in %s" % \
(how, allowed_hows))
allowed_ops = ['contains', 'within', 'intersects']
if op not in allowed_ops:
raise ValueError("`op` was \"%s\" but is expected to be in %s" % \
(op, allowed_ops))
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
if left_df.crs != right_df.crs:
print('Warning: CRS does not match!')
tree_idx = rtree.index.Index()
right_df_bounds = right_df['geometry'].apply(lambda x: x.bounds)
for i in right_df_bounds.index:
tree_idx.insert(i, right_df_bounds[i])
idxmatch = (left_df['geometry'].apply(lambda x: x.bounds)
.apply(lambda x: list(tree_idx.intersection(x))))
idxmatch = idxmatch[idxmatch.apply(len) > 0]
r_idx = np.concatenate(idxmatch.values)
l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
# Vectorize predicate operations
def find_intersects(a1, a2):
return a1.intersects(a2)
def find_contains(a1, a2):
return a1.contains(a2)
predicate_d = {'intersects': find_intersects,
'contains': find_contains,
'within': find_contains}
check_predicates = np.vectorize(predicate_d[op])
result = (
pd.DataFrame(
np.column_stack(
[l_idx,
r_idx,
check_predicates(
left_df['geometry']
.apply(lambda x: prepared.prep(x))[l_idx],
right_df['geometry'][r_idx])
]))
)
result.columns = ['index_%s' % lsuffix, 'index_%s' % rsuffix, 'match_bool']
result = (
pd.DataFrame(result[result['match_bool']==1])
.drop('match_bool', axis=1)
)
if op == "within":
# within implemented as the inverse of contains; swap names
left_df, right_df = right_df, left_df
result = result.rename(columns={
'index_%s' % (lsuffix): 'index_%s' % (rsuffix),
'index_%s' % (rsuffix): 'index_%s' % (lsuffix)})
if how == 'inner':
result = result.set_index('index_%s' % lsuffix)
return (
left_df
.merge(result, left_index=True, right_index=True)
.merge(right_df.drop('geometry', axis=1),
left_on='index_%s' % rsuffix, right_index=True,
suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
)
elif how == 'left':
result = result.set_index('index_%s' % lsuffix)
return (
left_df
.merge(result, left_index=True, right_index=True, how='left')
.merge(right_df.drop('geometry', axis=1),
how='left', left_on='index_%s' % rsuffix, right_index=True,
suffixes=('_%s' % lsuffix, '_%s' % rsuffix))
)
elif how == 'right':
return (
left_df
.drop('geometry', axis=1)
.merge(result.merge(right_df,
left_on='index_%s' % rsuffix, right_index=True,
how='right'), left_index=True,
right_on='index_%s' % lsuffix, how='right')
.set_index('index_%s' % rsuffix)
)
|
bsd-3-clause
|
khkaminska/scikit-learn
|
sklearn/preprocessing/tests/test_imputation.py
|
213
|
11911
|
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
bsd-3-clause
|
UNR-AERIAL/scikit-learn
|
examples/applications/wikipedia_principal_eigenvector.py
|
233
|
7819
|
"""
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
|
bsd-3-clause
|
louispotok/pandas
|
pandas/tests/scalar/interval/test_interval.py
|
3
|
6583
|
from __future__ import division
import numpy as np
from pandas import Interval, Timestamp, Timedelta
import pandas.core.common as com
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
@pytest.mark.parametrize('left, right, expected', [
(0, 5, 5),
(-2, 5.5, 7.5),
(10, 10, 0),
(10, np.inf, np.inf),
(-np.inf, -5, np.inf),
(-np.inf, np.inf, np.inf),
(Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),
(Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),
(Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),
(Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])
def test_length(self, left, right, expected):
# GH 18789
iv = Interval(left, right)
result = iv.length
assert result == expected
@pytest.mark.parametrize('left, right, expected', [
('2017-01-01', '2017-01-06', '5 days'),
('2017-01-01', '2017-01-01 12:00:00', '12 hours'),
('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),
('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])
@pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))
def test_length_timestamp(self, tz, left, right, expected):
# GH 18789
iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
result = iv.length
expected = Timedelta(expected)
assert result == expected
@pytest.mark.parametrize('left, right', [
('a', 'z'),
(('a', 'b'), ('c', 'd')),
(list('AB'), list('ab')),
(Interval(0, 1), Interval(1, 2))])
def test_length_errors(self, left, right):
# GH 18789
iv = Interval(left, right)
msg = 'cannot compute length between .* and .*'
with tm.assert_raises_regex(TypeError, msg):
iv.length
def test_math_add(self, interval):
expected = Interval(1, 2)
actual = interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + interval
assert expected == actual
actual = interval
actual += 1
assert expected == actual
msg = r"unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, interval):
expected = Interval(-1, 0)
actual = interval - 1
assert expected == actual
actual = interval
actual -= 1
assert expected == actual
msg = r"unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, interval):
expected = Interval(0, 2)
actual = interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * interval
assert expected == actual
actual = interval
actual *= 2
assert expected == actual
msg = r"unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * Interval(1, 2)
msg = r"can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, interval):
expected = Interval(0, 0.5)
actual = interval / 2.0
assert expected == actual
actual = interval
actual /= 2.0
assert expected == actual
msg = r"unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / Interval(1, 2)
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
def test_constructor_errors(self):
msg = "invalid option for 'closed': foo"
with tm.assert_raises_regex(ValueError, msg):
Interval(0, 1, closed='foo')
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
Interval(1, 0)
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
|
bsd-3-clause
|
aswolf/xmeos
|
xmeos/test/old_test_RTperturb.py
|
1
|
17911
|
import numpy as np
from models import compress
from models import thermal
from models import composite
from models import core
import pytest
import matplotlib.pyplot as plt
import matplotlib as mpl
from abc import ABCMeta, abstractmethod
import copy
#====================================================================
# Define "slow" tests
# - indicated by @slow decorator
# - slow tests are run only if using --runslow cmd line arg
#====================================================================
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
#====================================================================
class BaseTestThermalPathMod(object):
@abstractmethod
def load_thermal_path_mod(self, eos_d):
assert False, 'must implement load_thermal_path_mod()'
@abstractmethod
def init_params(self,eos_d):
assert False, 'must implement init_params()'
return eos_d
def test_heat_capacity(self):
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Tmod_a = np.linspace(.7,1.3,Nsamp)*param_d['T0']
dT = Tmod_a[1] - Tmod_a[0]
# print eos_d['modtype_d']
thermal_path_mod = eos_d['modtype_d']['ThermalPathMod']
heat_capacity_a = thermal_path_mod.heat_capacity(Tmod_a,eos_d)
energy_a = thermal_path_mod.energy(Tmod_a,eos_d)
heat_capacity_num_a = np.gradient(energy_a,dT)
E_range = np.max(energy_a)-np.min(energy_a)
T_range = Tmod_a[-1]-Tmod_a[0]
Cv_scl = E_range/T_range
# Cv_range = np.max(heat_capacity_a)-np.min(heat_capacity_a)
Cv_diff_a = heat_capacity_num_a-heat_capacity_a
# Cverr = np.max(np.abs(Cv_diff_a/Cv_range))
Cverr = np.max(np.abs(Cv_diff_a/Cv_scl))
CVTOL = 1.0/Nsamp
# print self
# print PTOL*Prange
# def plot_press_mismatch(Tmod_a,press_a,press_num_a):
# plt.figure()
# plt.ion()
# plt.clf()
# plt.plot(Tmod_a,press_num_a,'bx',Tmod_a,press_a,'r-')
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plot_press_mismatch(Tmod_a,press_a,press_num_a)
assert np.abs(Cverr) < CVTOL, '(Cv error)/Cv_scl, ' + np.str(Cverr) + \
', must be less than CVTOL, ' + np.str(CVTOL)
#====================================================================
class BaseTestThermalMod(object):
@abstractmethod
def load_thermal_mod(self, eos_d):
assert False, 'must implement load_thermal_mod()'
@abstractmethod
def init_params(self,eos_d):
assert False, 'must implement init_params()'
return eos_d
def test_heat_capacity_isochore(self):
Nsamp = 10001
eos_d = self.init_params({})
param_d = eos_d['param_d']
Viso = 0.7*param_d['V0']
Tmod_a = np.linspace(.7,1.3,Nsamp)*param_d['T0']
dT = Tmod_a[1] - Tmod_a[0]
# print eos_d['modtype_d']
thermal_mod = eos_d['modtype_d']['ThermalMod']
heat_capacity_a = thermal_mod.heat_capacity(Viso,Tmod_a,eos_d)
energy_a = np.squeeze( thermal_mod.energy(Viso,Tmod_a,eos_d) )
heat_capacity_num_a = np.gradient(energy_a,dT)
E_range = np.max(energy_a)-np.min(energy_a)
T_range = Tmod_a[-1]-Tmod_a[0]
Cv_scl = E_range/T_range
# Cv_range = np.max(heat_capacity_a)-np.min(heat_capacity_a)
Cv_diff_a = heat_capacity_num_a-heat_capacity_a
# Cverr = np.max(np.abs(Cv_diff_a/Cv_range))
Cverr = np.max(np.abs(Cv_diff_a/Cv_scl))
CVTOL = 1.0/Nsamp
# print self
# print PTOL*Prange
# def plot_press_mismatch(Tmod_a,press_a,press_num_a):
# plt.figure()
# plt.ion()
# plt.clf()
# plt.plot(Tmod_a,press_num_a,'bx',Tmod_a,press_a,'r-')
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# plot_press_mismatch(Tmod_a,press_a,press_num_a)
assert np.abs(Cverr) < CVTOL, '(Cv error)/Cv_scl, ' + np.str(Cverr) + \
', must be less than CVTOL, ' + np.str(CVTOL)
#====================================================================
#====================================================================
class TestRosenfeldTaranzonaPerturb(BaseTestThermalMod):
def load_thermal_mod(self, eos_d):
thermal_mod = models.RosenfeldTaranzonaPerturb()
core.set_modtypes( ['ThermalMod'], [thermal_mod], eos_d )
pass
def load_gamma_mod(self, eos_d):
gamma_mod = models.GammaPowLaw()
core.set_modtypes( ['GammaMod'], [gamma_mod], eos_d )
pass
def load_compress_path_mod(self, eos_d):
S0, = core.get_params(['S0'],eos_d)
compress_path_mod = models.Vinet(path_const='S',level_const=S0,
supress_energy=False,
supress_press=False)
core.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def load_eos_mod(self, eos_d):
self.load_compress_path_mod(eos_d)
self.load_gamma_mod(eos_d)
self.load_thermal_mod(eos_d)
full_mod = models.ThermalPressMod()
core.set_modtypes( ['FullMod'], [full_mod], eos_d )
pass
def init_params(self,eos_d):
core.set_consts( [], [], eos_d )
# EOS Parameter values initially set by Mosenfelder2009
# Set model parameter values
mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
T0 = 1673.0
S0 = 0.0 # must adjust
param_key_a = ['T0','S0','mass_avg']
param_val_a = np.array([T0,S0,mass_avg])
core.set_params( param_key_a, param_val_a, eos_d )
V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom
K0 = 20.8
KP0= 10.2
# KP20 = -2.86 # Not actually used!
E0 = 0.0
param_key_a = ['V0','K0','KP0','E0']
param_val_a = np.array([V0,K0,KP0,E0])
core.set_params( param_key_a, param_val_a, eos_d )
VR = V0
gammaR = 0.46
qR = -1.35
param_key_a = ['VR','gammaR','qR']
param_val_a = np.array([VR,gammaR,qR])
core.set_params( param_key_a, param_val_a, eos_d )
dE0th = +1.0
dV0th = -0.02
dK0th = +0.1
dKP0th = -0.00
# dE0th = +0.4
# dV0th = -0.0
# dK0th = -0.01
# dKP0th = -0.03
lognfac = 0.0
mexp = 3.0/5
param_key_a = ['dE0th','dV0th','dK0th','dKP0th','lognfac','mexp']
param_val_a = np.array([dE0th,dV0th,dK0th,dKP0th,lognfac,mexp])
core.set_params( param_key_a, param_val_a, eos_d )
# Must convert energy units from kJ/g to eV/atom
energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']
core.set_consts( ['energy_conv_fac'], [energy_conv_fac],
eos_d )
self.load_eos_mod( eos_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
return eos_d
def test_energy_curves_Spera2011(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
# energy_conv_fac, = core.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
cmap=plt.get_cmap('coolwarm')
col_a = cmap(1.0*(Tgrid_a-Tgrid_a[0])/np.ptp(Tgrid_a))[:,:3]
plt.ion()
plt.figure()
[plt.plot(ipress_a, ienergy_a,'-',color=icol_a,label=iT) \
for ipress_a,ienergy_a,icol_a,iT in zip(press_mod_a,energy_mod_a,col_a,Tgrid_a)]
ax = plt.axes()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1],labels[::-1],loc='upper left')
plt.xlim(-5,165)
ybnd = [np.min(energy_mod_a[press_mod_a<165]), np.max(energy_mod_a[press_mod_a<165])]
plt.ylim(ybnd[0],ybnd[1])
# plt.ylim(-100.5,-92)
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
def test_kinetic_contribution(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
Cvkin_a = genRT_mod.calc_heat_capacity_kin( Tgrid_a ,eos_d )
Ekin_a = genRT_mod.calc_energy_kin( Tgrid_a ,eos_d )
Cvkin_dE_err_a = ( Cvkin_a - np.gradient( Ekin_a, dT ) )/kboltz
assert np.all( np.abs(Cvkin_dE_err_a[1:-1]) < TOL ), \
'Cvkin must match numerical energy deriv'
Skin_a = genRT_mod.calc_entropy_kin( Tgrid_a ,eos_d, Tref=eos_d['param_d']['T0'] )
Cvkin_dS_err_a = ( Cvkin_a - Tgrid_a*np.gradient( Skin_a, dT ) )/kboltz
assert np.all( np.abs(Cvkin_dS_err_a[1:-1]) < TOL ), \
'Cvkin must match numerical entropy deriv'
Fkin_a = Ekin_a-Tgrid_a*Skin_a
Skin_dF_err_a = ( Skin_a + np.gradient( Fkin_a, dT ) )/kboltz
assert np.all( np.abs(Skin_dF_err_a[1:-1]) < TOL ), \
'Skin must match numerical free energy deriv'
def test_potential_contribution(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
# verify potential heat capacity (energy deriv)
acoef_a, bcoef_a = thermal_mod.calc_RT_coef( iV, eos_d )
Cvpot_a = np.squeeze( genRT_mod.calc_heat_capacity_pot( Tgrid_a, eos_d,
bcoef_a=bcoef_a ) )
Epot_a = np.squeeze( genRT_mod.calc_energy_pot( Tgrid_a, eos_d,
acoef_a=acoef_a,
bcoef_a=bcoef_a ) )
Cvpot_dE_a = (Cvpot_a - np.gradient( Epot_a, dT ))/kboltz
assert np.all( np.abs(Cvpot_dE_a[1:-1]) < TOL ), \
'Cvpot must match numerical energy deriv'
Spot_a = np.squeeze( genRT_mod.calc_entropy_pot( Tgrid_a, eos_d,
bcoef_a=bcoef_a ) )
Cvpot_dS_a = ( Cvpot_a - Tgrid_a*np.gradient( Spot_a, dT ) )/kboltz
assert np.all( np.abs(Cvpot_dS_a[1:-1]) < TOL ), \
'Cvpot must match numerical entropy deriv'
Fpot_a = Epot_a-Tgrid_a*Spot_a
Spot_dF_err_a = ( Spot_a + np.gradient( Fpot_a, dT ) )/kboltz
assert np.all( np.abs(Spot_dF_err_a[1:-1]) < TOL ), \
'Spot must match numerical free energy deriv'
def test_total_entropy(self):
Nsamp = 1001
eos_d = self.init_params({})
eos_d['param_d']['E0'] = -21.3
eos_d['param_d']['dE0th'] = 0.5
V0 = eos_d['param_d']['V0']
Vgrid_a = V0*np.arange(0.4,1.11,0.1)
Tgrid_a = np.linspace( 2500, 5000, Nsamp)
dT = Tgrid_a[1]-Tgrid_a[0]
kboltz = eos_d['const_d']['kboltz']
# Test entropy
TOL = 1e-4
iV = Vgrid_a[0]
genRT_mod = models.GenRosenfeldTaranzona()
thermal_mod = eos_d['modtype_d']['ThermalMod']
full_mod = eos_d['modtype_d']['FullMod']
# verify total entropy
iFtot = np.squeeze( full_mod.free_energy( Vgrid_a[0], Tgrid_a, eos_d ) )
iStot = np.squeeze( full_mod.entropy( Vgrid_a[0], Tgrid_a, eos_d ) )
iSnum = -np.gradient( iFtot, dT )
Stot_dF_err_a = ( iStot - iSnum )/kboltz
assert np.all( np.abs(Stot_dF_err_a[1:-1]) < TOL ), \
'Spot must match numerical free energy deriv'
#====================================================================
class TestRosenfeldTaranzonaPerturbExpand(TestRosenfeldTaranzonaPerturb):
def load_compress_path_mod(self, eos_d):
S0, = core.get_params(['S0'],eos_d)
expand_adj_mod=models.Tait()
compress_path_mod = models.Vinet(path_const='S',level_const=S0,
supress_energy=False,
supress_press=False,
expand_adj_mod=expand_adj_mod)
core.set_modtypes( ['CompressPathMod'], [compress_path_mod], eos_d )
pass
def init_params(self,eos_d):
core.set_consts( [], [], eos_d )
# EOS Parameter values initially set by Mosenfelder2009
# Set model parameter values
mass_avg = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
T0 = 1673.0
S0 = 0.0 # must adjust
param_key_a = ['T0','S0','mass_avg']
param_val_a = np.array([T0,S0,mass_avg])
core.set_params( param_key_a, param_val_a, eos_d )
V0 = (38.575*1e-5)*mass_avg/eos_d['const_d']['Nmol']/1e3*1e30 # ang^3/atom
K0 = 20.8
KP0= 10.2
KP20 = -2.86 # Not actually used!
E0 = 0.0
param_key_a = ['V0','K0','KP0','KP20','E0']
param_val_a = np.array([V0,K0,KP0,KP20,E0])
core.set_params( param_key_a, param_val_a, eos_d )
VR = V0
gammaR = 0.46
qR = -1.35
param_key_a = ['VR','gammaR','qR']
param_val_a = np.array([VR,gammaR,qR])
core.set_params( param_key_a, param_val_a, eos_d )
dE0th = +1.0
dV0th = -0.02
dK0th = +0.1
dKP0th = -0.00
dKP20th = +1.0
# dE0th = +0.4
# dV0th = -0.0
# dK0th = -0.01
# dKP0th = -0.03
lognfac = 0.0
mexp = 3.0/5
param_key_a = ['dE0th','dV0th','dK0th','dKP0th','dKP20th','lognfac','mexp']
param_val_a = np.array([dE0th,dV0th,dK0th,dKP0th,dKP20th,lognfac,mexp])
core.set_params( param_key_a, param_val_a, eos_d )
# Must convert energy units from kJ/g to eV/atom
energy_conv_fac = mass_avg/eos_d['const_d']['kJ_molpereV']
core.set_consts( ['energy_conv_fac'], [energy_conv_fac],
eos_d )
self.load_eos_mod( eos_d )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
return eos_d
def test_energy_curves_Spera2011_exp(self):
Nsamp = 101
eos_d = self.init_params({})
param_d = eos_d['param_d']
Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
full_mod = eos_d['modtype_d']['FullMod']
compress_path_mod = eos_d['modtype_d']['CompressPathMod']
thermal_mod = eos_d['modtype_d']['ThermalMod']
# energy_conv_fac, = core.get_consts(['energy_conv_fac'],eos_d)
energy_mod_a = []
press_mod_a = []
for iT in Tgrid_a:
ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
energy_mod_a.append(ienergy_a)
press_mod_a.append(ipress_a)
# energy_mod_a = np.array( energy_mod_a )
energy_mod_a = np.array( energy_mod_a )
press_mod_a = np.array( press_mod_a )
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
cmap=plt.get_cmap('coolwarm')
col_a = cmap(1.0*(Tgrid_a-Tgrid_a[0])/np.ptp(Tgrid_a))[:,:3]
plt.ion()
plt.figure()
[plt.plot(ipress_a, ienergy_a,'-',color=icol_a,label=iT) \
for ipress_a,ienergy_a,icol_a,iT in zip(press_mod_a,energy_mod_a,col_a,Tgrid_a)]
ax = plt.axes()
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1],labels[::-1],loc='upper left')
plt.xlim(-5,165)
ybnd = [np.min(energy_mod_a[press_mod_a<165]), np.max(energy_mod_a[press_mod_a<165])]
plt.ylim(ybnd[0],ybnd[1])
# plt.ylim(-100.5,-92)
print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
print 'Do the figures agree (y/n or k for keyboard)?'
s = raw_input('--> ')
if s=='k':
from IPython import embed; embed(); import ipdb; ipdb.set_trace()
assert s=='y', 'Figure must match published figure'
pass
#====================================================================
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.