repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
APMonitor/arduino | 5_Moving_Horizon_Estimation/2nd_order_nonlinear/Python_GEKKO/tclab_mhe_2nd_order_semi-empirical.py | 1 | 7987 | import numpy as np
import time
import matplotlib.pyplot as plt
import random
# get gekko package with:
# pip install gekko
from gekko import GEKKO
# get tclab package with:
# pip install tclab
from tclab import TCLab
# Connect to Arduino
a = TCLab()
# Final time
tf = 10 # min
# number of data points (1 pt every 3 seconds)
n = tf * 20 + 1
# Configure heater levels
# Percent Heater (0-100%)
Q1s = np.zeros(n)
Q2s = np.zeros(n)
# Heater random steps every 50 sec
# Alternate steps by Q1 and Q2
Q1s[3:] = 100.0
Q1s[50:] = 0.0
Q1s[100:] = 80.0
Q2s[25:] = 60.0
Q2s[75:] = 100.0
Q2s[125:] = 25.0
# rapid, random changes every 5 cycles between 50 and 100
for i in range(130,180):
if i%10==0:
Q1s[i:i+10] = random.random() * 100.0
if (i+5)%10==0:
Q2s[i:i+10] = random.random() * 100.0
# Record initial temperatures (degC)
T1m = a.T1 * np.ones(n)
T2m = a.T2 * np.ones(n)
# Store MHE values for plots
Tmhe1 = T1m[0] * np.ones(n)
Tmhe2 = T2m[0] * np.ones(n)
Umhe = 10.0 * np.ones(n)
taumhe = 5.0 * np.ones(n)
amhe1 = 0.01 * np.ones(n)
amhe2 = 0.0075 * np.ones(n)
#########################################################
# Initialize Model as Estimator
#########################################################
# Use remote=False for local solve (Windows, Linux, ARM)
# remote=True for remote solve (All platforms)
m = GEKKO(name='tclab-mhe',remote=False)
# 60 second time horizon, 20 steps
m.time = np.linspace(0,60,21)
# Parameters to Estimate
U = m.FV(value=10,name='u')
U.STATUS = 0 # don't estimate initially
U.FSTATUS = 0 # no measurements
U.DMAX = 1
U.LOWER = 5
U.UPPER = 15
tau = m.FV(value=20,name='tau')
tau.STATUS = 0 # don't estimate initially
tau.FSTATUS = 0 # no measurements
tau.DMAX = 1
tau.LOWER = 15
tau.UPPER = 25
alpha1 = m.FV(value=0.01,name='a1') # W / % heater
alpha1.STATUS = 0 # don't estimate initially
alpha1.FSTATUS = 0 # no measurements
alpha1.DMAX = 0.001
alpha1.LOWER = 0.003
alpha1.UPPER = 0.03
alpha2 = m.FV(value=0.0075,name='a2') # W / % heater
alpha2.STATUS = 0 # don't estimate initially
alpha2.FSTATUS = 0 # no measurements
alpha2.DMAX = 0.001
alpha2.LOWER = 0.002
alpha2.UPPER = 0.02
# Measured inputs
Q1 = m.MV(value=0,name='q1')
Q1.STATUS = 0 # don't estimate
Q1.FSTATUS = 1 # receive measurement
Q2 = m.MV(value=0,name='q2')
Q2.STATUS = 0 # don't estimate
Q2.FSTATUS = 1 # receive measurement
# State variables
TH1 = m.SV(value=T1m[0],name='th1')
TH2 = m.SV(value=T2m[0],name='th2')
# Measurements for model alignment
TC1 = m.CV(value=T1m[0],name='tc1')
TC1.STATUS = 1 # minimize error between simulation and measurement
TC1.FSTATUS = 1 # receive measurement
TC1.MEAS_GAP = 0.1 # measurement deadband gap
TC1.LOWER = 0
TC1.UPPER = 200
TC2 = m.CV(value=T2m[0],name='tc2')
TC2.STATUS = 1 # minimize error between simulation and measurement
TC2.FSTATUS = 1 # receive measurement
TC2.MEAS_GAP = 0.1 # measurement deadband gap
TC2.LOWER = 0
TC2.UPPER = 200
Ta = m.Param(value=23.0+273.15) # K
mass = m.Param(value=4.0/1000.0) # kg
Cp = m.Param(value=0.5*1000.0) # J/kg-K
A = m.Param(value=10.0/100.0**2) # Area not between heaters in m^2
As = m.Param(value=2.0/100.0**2) # Area between heaters in m^2
eps = m.Param(value=0.9) # Emissivity
sigma = m.Const(5.67e-8) # Stefan-Boltzmann
# Heater temperatures
T1 = m.Intermediate(TH1+273.15)
T2 = m.Intermediate(TH2+273.15)
# Heat transfer between two heaters
Q_C12 = m.Intermediate(U*As*(T2-T1)) # Convective
Q_R12 = m.Intermediate(eps*sigma*As*(T2**4-T1**4)) # Radiative
# Semi-fundamental correlations (energy balances)
m.Equation(TH1.dt() == (1.0/(mass*Cp))*(U*A*(Ta-T1) \
+ eps * sigma * A * (Ta**4 - T1**4) \
+ Q_C12 + Q_R12 \
+ alpha1*Q1))
m.Equation(TH2.dt() == (1.0/(mass*Cp))*(U*A*(Ta-T2) \
+ eps * sigma * A * (Ta**4 - T2**4) \
- Q_C12 - Q_R12 \
+ alpha2*Q2))
# Empirical correlations (lag equations to emulate conduction)
m.Equation(tau * TC1.dt() == -TC1 + TH1)
m.Equation(tau * TC2.dt() == -TC2 + TH2)
# Global Options
m.options.IMODE = 5 # MHE
m.options.EV_TYPE = 2 # Objective type
m.options.NODES = 3 # Collocation nodes
m.options.SOLVER = 3 # IPOPT
m.options.COLDSTART = 1 # COLDSTART on first cycle
##################################################################
# Create plot
plt.figure(figsize=(10,7))
plt.ion()
plt.show()
# Main Loop
start_time = time.time()
prev_time = start_time
tm = np.zeros(n)
try:
for i in range(1,n):
# Sleep time
sleep_max = 3.0
sleep = sleep_max - (time.time() - prev_time)
if sleep>=0.01:
time.sleep(sleep-0.01)
else:
time.sleep(0.01)
# Record time and change in time
t = time.time()
dt = t - prev_time
prev_time = t
tm[i] = t - start_time
# Read temperatures in Celsius
T1m[i] = a.T1
T2m[i] = a.T2
# Insert measurements
TC1.MEAS = T1m[i]
TC2.MEAS = T2m[i]
Q1.MEAS = Q1s[i-1]
Q2.MEAS = Q2s[i-1]
# Start estimating U after 10 cycles (20 sec)
if i==10:
U.STATUS = 1
tau.STATUS = 1
alpha1.STATUS = 1
alpha2.STATUS = 1
# Predict Parameters and Temperatures with MHE
m.solve()
if m.options.APPSTATUS == 1:
# Retrieve new values
Tmhe1[i] = TC1.MODEL
Tmhe2[i] = TC2.MODEL
Umhe[i] = U.NEWVAL
taumhe[i] = tau.NEWVAL
amhe1[i] = alpha1.NEWVAL
amhe2[i] = alpha2.NEWVAL
else:
# Solution failed, copy prior solution
Tmhe1[i] = Tmhe1[i-1]
Tmhe2[i] = Tmhe1[i-1]
Umhe[i] = Umhe[i-1]
taumhe[i] = taumhe[i-1]
amhe1[i] = amhe1[i-1]
amhe2[i] = amhe2[i-1]
# Write new heater values (0-100)
a.Q1(Q1s[i])
a.Q2(Q2s[i])
# Plot
plt.clf()
ax=plt.subplot(3,1,1)
ax.grid()
plt.plot(tm[0:i],T1m[0:i],'ro',label=r'$T_1$ measured')
plt.plot(tm[0:i],Tmhe1[0:i],'k-',label=r'$T_1$ MHE')
plt.plot(tm[0:i],T2m[0:i],'bx',label=r'$T_2$ measured')
plt.plot(tm[0:i],Tmhe2[0:i],'k--',label=r'$T_2$ MHE')
plt.ylabel('Temperature (degC)')
plt.legend(loc=2)
ax=plt.subplot(3,1,2)
ax.grid()
plt.plot(tm[0:i],Umhe[0:i],'k-',label='Heat Transfer Coeff')
plt.plot(tm[0:i],taumhe[0:i],'g:',label='Time Constant')
plt.plot(tm[0:i],amhe1[0:i]*1000,'r--',label=r'$\alpha_1$x1000')
plt.plot(tm[0:i],amhe2[0:i]*1000,'b--',label=r'$\alpha_2$x1000')
plt.ylabel('Parameters')
plt.legend(loc='best')
ax=plt.subplot(3,1,3)
ax.grid()
plt.plot(tm[0:i],Q1s[0:i],'r-',label=r'$Q_1$')
plt.plot(tm[0:i],Q2s[0:i],'b:',label=r'$Q_2$')
plt.ylabel('Heaters')
plt.xlabel('Time (sec)')
plt.legend(loc='best')
plt.draw()
plt.pause(0.05)
# Turn off heaters
a.Q1(0)
a.Q2(0)
# Save figure
plt.savefig('tclab_mhe.png')
# Allow user to end loop with Ctrl-C
except KeyboardInterrupt:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Shutting down')
a.close()
plt.savefig('tclab_mhe.png')
# Make sure serial connection still closes when there's an error
except:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Error: Shutting down')
a.close()
plt.savefig('tclab_mhe.png')
raise
| apache-2.0 |
glewis17/cvxpy | doc/sphinxext/docscrape_sphinx.py | 154 | 7759 | import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc,8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| gpl-3.0 |
Aryan-Barbarian/bigbang | bigbang/git_repo.py | 2 | 5894 | from git import *
import git
import pandas as pd
import numpy as np
from time import mktime
from datetime import datetime
from entity_resolution import entity_resolve
import networkx as nx
from config.config import CONFIG
ALL_ATTRIBUTES = CONFIG.all_attributes #["HEXSHA", "Committer Name", "Committer Email", "Commit Message", "Time", "Parent Commit", "Touched File"]
def cache_fixer(r): # Adds info from row to graph
r["Touched File"] = [x.strip() for x in r["Touched File"][1:-1].split(",")]
r["Time"] = pd.to_datetime(r["Time"], unit = "s");
return r
"""
Class that stores an instance of a git repository given the address to that
repo relative to this file. It returns the data in multiple useful forms.
"""
class GitRepo(object):
""" A pandas DataFrame object indexed by time that stores
the raw form of the repo's commit data as a table where
each row is a commit and each col represents an attribute
of that commit (time, message, commiter name, committer email,
commit hexsha)
"""
def __init__(self, name, url=None, attribs = ALL_ATTRIBUTES, cache=None):
self._commit_data = None;
self.url = url;
self.repo = None
self.name = name;
if cache is None:
self.repo = Repo(url)
self.populate_data(ALL_ATTRIBUTES)
else:
cache = cache.apply(cache_fixer, axis=1)
cache.set_index(cache["Time"])
self._commit_data = cache;
missing = list();
cols = self.commit_data.columns
for attr in attribs:
if attr not in cols and unicode(attr) not in cols:
missing.append(attr);
if len(missing) > 0:
print("There were " + str(len(missing)) + " missing attributes: ")
print(missing);
if ("Committer Name" in attribs and "Committer Email" in attribs):
self._commit_data["Person-ID"] = None;
self._commit_data = self._commit_data.apply(lambda row: entity_resolve(row, "Committer Email", "Committer Name"), axis=1)
def gen_data(self, repo, raw):
if not repo.active_branch.is_valid():
print("Found an empty repo: " + str(self.name))
return;
first = repo.commit()
commit = first
firstHexSha = first.hexsha;
generator = git.Commit.iter_items(repo, firstHexSha);
if "Touched File" in raw:
print("WARNING: Currently going through file diffs. This will take a very long time (1 minute per 3000 commits.) We suggest using a small repository.")
for commit in generator:
try:
if "Touched File" in raw:
diff_list = list();
for diff in commit.diff(commit.parents[0]):
if diff.b_blob:
diff_list.append(diff.b_blob.path);
else:
diff_list.append(diff.a_blob.path);
raw["Touched File"].append(diff_list)
if "Committer Name" in raw:
raw["Committer Name"].append(commit.committer.name)
if "Committer Email" in raw:
raw["Committer Email"].append(commit.committer.email)
if "Commit Message" in raw:
raw["Commit Message"].append(commit.message)
if "Time" in raw or True: # TODO: For now, we always ask for the time
raw["Time"].append(pd.to_datetime(commit.committed_date, unit = "s"));
if "Parent Commit" in raw:
raw["Parent Commit"].append([par.hexsha for par in commit.parents])
if "HEXSHA" in raw:
raw["HEXSHA"].append(commit.hexsha)
except LookupError:
print("failed to add a commit because of an encoding error")
def populate_data(self, attribs = ALL_ATTRIBUTES):
raw = dict()
for attrib in attribs:
raw[attrib] = list();
repo = self.repo
self.gen_data(repo, raw);
print(type(raw["Time"]))
# TODO: NEEDS TIME
time_index = pd.DatetimeIndex(raw["Time"], periods = 24, freq = "H")
self._commit_data = pd.DataFrame(raw, index = time_index);
def by_committer(self):
return self.commit_data.groupby('Committer Name').size().order()
def commits_per_day(self):
ans = self.commit_data.groupby(self.commit_data.index).size()
ans = ans.resample("D", how=np.sum)
return ans;
def commits_per_week(self):
ans = self.commits_per_day();
ans = ans.resample("W", how=np.sum)
return ans;
def commits_per_day_full(self):
ans = self.commit_data.groupby([self.commit_data.index, "Committer Name" ]).size()
return ans;
@property
def commit_data(self):
return self._commit_data;
def commits_for_committer(self, committer_name):
full_info = self.commit_data
time_index = pd.DatetimeIndex(self.commit_data["Time"], periods = 24, freq = "H");
df = full_info.loc[full_info["Committer Name"] == committer_name]
df = df.groupby([df.index]).size()
df = df.resample("D", how = np.sum, axis = 0)
return df
def merge_with_repo(self, other):
# TODO: What if commits have the same time?
self._commit_data = self.commit_data.append(other.commit_data);
class MultiGitRepo(GitRepo):
"""
Repos must have a "Repo Name" column
"""
def __init__(self, repos, attribs=ALL_ATTRIBUTES):
self._commit_data = repos[0].commit_data.copy(deep=True);
for i in range(1, len(repos)):
self.merge_with_repo(repos[i]);
| gpl-2.0 |
vivekmishra1991/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 176 | 12155 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
krez13/scikit-learn | sklearn/ensemble/tests/test_forest.py | 26 | 41675 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, est.transform, X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
| bsd-3-clause |
jhaux/tensorflow | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
yanlend/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
joshloyal/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 24 | 2080 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal(size=n_samples)
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples // 2], y[:n_samples // 2]
X_test, y_test = X[n_samples // 2:], y[n_samples // 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
dfdx/masque | others/faceless-python-old/faceless/lucaskanade.py | 1 | 7321 |
from PIL import Image
from scipy.ndimage.interpolation import affine_transform
from numpy import *
from matplotlib import pylab as plt
from matplotlib import gridspec
# nabla_Ix = array([[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]])
# nabla_Iy = array([[1, 1, 3, 3], [1, 1, 3, 3], [1, 1, 3, 3]])
# im_grad = (nabla_Ix, nabla_Iy)
# w, h = (4, 3)
N_p = 6
def imgrad(im):
"""[nabla(I_x), nabla(I_y)]"""
if len(im.shape) != 2:
raise Exception("Can work only with grayscale images")
grad = [g.astype(int32) for g in gradient(im.astype(int32))]
grad.reverse()
return grad
def flatten_params(A, b):
M = hstack([A, b.reshape((b.size, 1))])
return M.flatten()
def structure_params(p):
p = p.reshape(2, 3)
return p[:, 0:2], p[:, -1]
def interp_im(im, y, x):
x = asarray(x)
y = asarray(y)
x0 = floor(x).astype(int)
x1 = x0 + 1
y0 = floor(y).astype(int)
y1 = y0 + 1
x0 = clip(x0, 0, im.shape[1]-1);
x1 = clip(x1, 0, im.shape[1]-1);
y0 = clip(y0, 0, im.shape[0]-1);
y1 = clip(y1, 0, im.shape[0]-1);
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
# TODO: Visualize!
def quadtobox(im, dst, M):
# Dimensions of destination image - integers, assume rectangle
minv = amin(dst.T, axis=0)
maxv = amax(dst.T, axis=0)
# xg, yg = meshgrid(range(maxv[0] + 1), range(maxv[1] + 1))
xg, yg = meshgrid(range(minv[0], maxv[0]), range(minv[1], maxv[1]))
xy = vstack([xg.T.flatten(), yg.T.flatten()])
xy = vstack([xy, ones((1, xy.shape[1]))])
# Transform into source
uv = dot(M, xy)
# Remove homogeneous
uv = uv[0:2,:].T
# Sample
xi = uv[:, 0].reshape((maxv[0] - minv[0], maxv[1] - minv[1])).T
yi = uv[:, 1].reshape((maxv[0] - minv[0], maxv[1] - minv[1])).T
wimg = interp_im(im, yi, xi)
return wimg
def warp_a(im, p, dst):
p = asarray(p).reshape(2, 3)
M = vstack([p, [0, 0, 1]])
M[0, 0] += 1
M[1, 1] += 1
wimg = quadtobox(im, dst, M)
return wimg
def jacobian(nx, ny):
jac_x = kron(array([range(0, nx)]), ones((ny, 1)))
jac_y = kron(array([range(0, ny)]).T, ones((1, nx)))
jac_zero = zeros((ny, nx))
jac_one = ones((ny, nx))
row_1 = hstack([jac_x, jac_zero, jac_y, jac_zero, jac_one, jac_zero])
row_2 = hstack([jac_zero, jac_x, jac_zero, jac_y, jac_zero, jac_one])
dW_dp = vstack([row_1, row_2])
return dW_dp
def sd_images(dW_dp, im_grad, N_p, h, w):
nabla_Ix, nabla_Iy = im_grad # TODO: swap axes
VI_dW_dp = zeros((h, w * N_p))
for p in range(0, N_p):
Tx = nabla_Ix * dW_dp[0:h, p * w : p * w + w]
Ty = nabla_Iy * dW_dp[h:, p * w : p * w + w]
VI_dW_dp[:, p * w : p * w + w] = Tx + Ty
return VI_dW_dp
def sd_update(VI_dW_dp, error_im, N_p, w):
sd_delta_p = zeros((N_p, 1))
for p in range(N_p):
h1 = VI_dW_dp[:, p*w : p*w + w]
sd_delta_p[p] = sum(h1 * error_im)
return sd_delta_p
def hessian(VI_dW_dp, N_p, w):
H = zeros((N_p, N_p))
for i in range(N_p):
h1 = VI_dW_dp[:, i*w : i*w + w]
for j in range(N_p):
h2 = VI_dW_dp[:, j*w : j*w + w]
H[i, j] = sum(h1 * h2)
return H
def update_step(p, delta_p):
p = p.reshape(2, 3)
delta_p = delta_p.reshape((2, 3))
# print '[0] p =', p
# print '[1] delta_p = ', delta_p
delta_M = vstack([delta_p, array([0, 0, 1])])
delta_M[0, 0] = delta_M[0, 0] + 1
delta_M[1, 1] = delta_M[1, 1] + 1
# print '[2] delta_M =', delta_M
delta_M = linalg.inv(delta_M)
# print '[3] inv(delta_M) =', delta_M
warp_M = vstack([p, array([0, 0, 1])])
warp_M[0, 0] += 1
warp_M[1, 1] += 1
comp_M = dot(warp_M, delta_M)
# print '[4] comp_M =', comp_M
p = comp_M[0:2, :]
p[0, 0] -= 1
p[1, 1] -= 1
return p.flatten()
def inv_comp(im, tmpl, n_iter=10, p_init=zeros((6,))):
"""Applies inverse compositional approach to aligning im to tmpl.
Estimates vector of parameters p = [p_1, p_2, p_3, p_4, p_5, p_6]"""
im = im.astype(int64)
tmpl = tmpl.astype(int64)
h, w = tmpl.shape
# tmpl_pts = array([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).T
tmpl_pts = ij2xy(array([[90, 260], [90, 530], [400, 530], [400, 260]])).T
nabla_T = imgrad(tmpl)
dW_dp = jacobian(w, h)
VT_dW_dp = sd_images(dW_dp, nabla_T, N_p, h, w)
# show_sd_images(VT_dW_dp, w)
# import time; time.sleep(5)
H = hessian(VT_dW_dp, N_p, w)
H_inv = linalg.inv(H)
warp_p = p_init.copy()
fit_err = []
for i in range(n_iter):
print 'iteration %s' % i
IWxp = warp_a(im, warp_p, tmpl_pts)
plot_imgs([im, IWxp], ratios=[2, 1])
plt.show()
error_im = IWxp - tmpl
fit_err.append(sqrt(mean(error_im * error_im)))
print "MSE: ", fit_err[-1]
sd_delta_p = sd_update(VT_dW_dp, error_im, N_p, w)
delta_p = dot(H_inv, sd_delta_p)
warp_p = update_step(warp_p, delta_p)
return warp_p
######### REPL Helpers ############
def show(im, gray=True):
plt.figure()
if gray:
plt.gray()
plt.imshow(im)
plt.show()
def show_pil(im, gray=None):
Image.fromarray(uint8(im)).show()
def show_sd_images(sd_imgs, w):
for i in xrange(6):
show_pil(sd_imgs[:, i*w : (i + 1)*w])
def add_rect(i, j, h, w):
plt.gca().add_patch(plt.Rectangle((j, i), w, h, fill=False))
def plot_imgs(imgs, ratios=[1, 1]):
plt.gray()
gs = gridspec.GridSpec(1, len(imgs), width_ratios=ratios)
for i in range(len(imgs)):
plt.subplot(gs[i])
plt.imshow(imgs[i])
return gs
######## Test Scenarios ###########
face_dst = array([[90, 260], [90, 530], [400, 530], [400, 260]])
def test_warp_a():
im = array(Image.open('face.bmp').convert('L'))
dst = face_dst
p = array([0, 0, 0, 0, 0, 0])
def test_inv_comp(p_real=[0, .1, .1, 0, 0, 0], n_iter=10):
im = asarray(Image.open('face.bmp').convert('L'))
imh, imw = im.shape
dst = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = dst.min(axis=0)
i1, j1 = dst.max(axis=0)
# tmpl = im[i0:i1, j0:j1]
tmpl = warp_a(im, p_real, ij2xy(dst).T)
return inv_comp(im, tmpl, n_iter)
def test_rect():
im = array(Image.open('face.bmp').convert('L'))
pts = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = pts.min(axis=0)
i1, j1 = pts.max(axis=0)
plt.figure()
plt.subplot(1, 2, 1)
plt.gray()
plt.imshow(im)
add_rect(i0, j0, i1 - i0, j1 - j0)
plt.subplot(1, 2, 2)
box = im[i0:i1, j0:j1]
plt.imshow(box)
plt.show()
def test_rect2():
im = array(Image.open('face.bmp').convert('L'))
pts = array([[90, 260], [90, 530], [400, 530], [400, 260]])
i0, j0 = pts.min(axis=0)
i1, j1 = pts.max(axis=0)
box = im[i0:i1, j0:j1]
gs = plot_imgs([im, box], ratios=[2, 1])
# plt.subplot(gs[0])
# add_rect(i0, j0, i1 - i0, j1 - j0)
plt.show()
return
| mit |
bryangraham/ipt | ipt/att.py | 1 | 39159 | # Load library dependencies
import numpy as np
import numpy.linalg
import scipy as sp
import scipy.optimize
import scipy.stats
import pandas as pd
# Import logit() command in ipt module since att() calls it
from .logit import logit
# Define att() function
#-----------------------------------------------------------------------------#
def att(D, Y, r_W, t_W, study_tilt=True, rlgrz=1, s_wgt=None, nocons=False, c_id=None, silent=False):
"""
AUTHOR: Bryan S. Graham, UC - Berkeley, [email protected]
DATE: Python 2.7 code on 26 May 2016, updated for Python 3.6 on 15 July 2018
This function estimates the average treatment effect on the treated (ATT)
using the "auxiliary-to-study tilting" (AST) method described by
Graham, Pinto and Egel (2016, Journal of Business and Economic Statistics).
The notation below mirrors that in the paper where possible. The Supplemental
Web Appendix of the paper describes the estimation algorithm implemented here
in detail. A copy of the paper and all supplemental appendices can be found
online at http://bryangraham.github.io/econometrics/
INPUTS
------
D : N x 1 pandas.Series with ith element equal to 1 if ith unit in the merged
sample is from the study population and zero if from the auxiliary
population (i.e., D is the "treatment" indicator)
Y : N x 1 pandas.Series of observed outcomes
r_W : r(W), N x 1+L pandas.DataFrame of functions of always observed covariates
-- these are the propensity score basis functions
t_W : t(W), N x 1+M pandas.DataFrame of functions of always observed covariates
-- these are the balancing functions
study_tilt: If True compute the study sample tilt. This should be set to False
if all the elements in t(W) are also contained in h(W). In that
case the study_tilt coincides with its empirical measure.This
measure is returned in the pi_s vector when study_tilt = False.
rlgrz : Regularization parameter. Should positive and less than or equal
to one. Smaller values correspond to less regularizations, but
may cause underflow problems when overlap is poor. The default
value will be adequate for most applications.
s_wgt : N x 1 pandas.Series of sampling weights variable (optional)
nocons : If True, then do NOT add constant to h_W and t_W matrix
(only set to True if user passes in dataframes with constants included)
c_id : N X 1 pandas.Series of unique `cluster' id values (assumed to be integer valued) (optional)
NOTE: Default is to assume independent observations and report heteroscedastic robust
standard errors
NOTE: Data are assumed to be pre-sorted by groups.
silent : if silent = True display less optimization information and use
lower tolerance levels (optional)
OUTPUTS
-------
gamma_ast : AST estimate of gamma (the ATT)
vcov_gamma_ast : estimated large sample variance of gamma
pscore_tests : list of [study_test, auxiliary_test] where
study_test : ChiSq test statistic of H0 : lambda_s = 0; list with
[statistic, dof, p-val]
NOTE: returns [None, None, None] if study_tilt = False
auxiliary_test : ChiSq test statistic of H0 : lambda_a = 0; list with
[statistic, dof, p-val]
tilts : numpy array with pi_eff, pi_s & pi_a as columns, sorted according
to the input data, and where
pi_eff : Semiparametrically efficient estimate of F_s(W)
pi_s : Study sample tilt
pi_a : Auxiliary sample tilt
exitflag : 1 = success, 2 = can't compute MLE of p-score, 3 = can't compute study/treated tilt,
4 = can't compute auxiliary/control tilt
FUNCTIONS CALLED : logit() (...logit_logl(), logit_score(), logit_hess()...)
---------------- ast_crit(), ast_foc(), ast_soc() (...ast_phi()...)
"""
def ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz):
"""
This function evaluates the regularized phi(v) function for
the logit propensity score case (as well as its first and
second derivatives) as described in the Supplemental
Web Appendix of Graham, Pinto and Egel (2016, JBES).
INPUTS
------
lmbda : vector of tilting parameters
t_W : vector of balancing moments
p_W_index : index of estimated logit propensity score
NQ : sample size times the marginal probability of missingness
rlgrz : Regularization parameter. See discussion in main header.
OUTPUTS
-------
phi, phi1, phi2 : N x 1 vectors with elements phi(p_W_index + lmbda't_W)
and its first and second derivatives w.r.t to
v = p_W_index + lmbda't_W
"""
# Adjust the NQ cut-off value used for quadratic extrapolation according
# to the user-defined rlgrz parameter
NQ = NQ*rlgrz
# Coefficients on quadratic extrapolation of phi(v) used to regularize
# the problem
c = -(NQ - 1)
b = NQ + (NQ - 1)*np.log(1/(NQ - 1))
a = -(NQ - 1)*(1 + np.log(1/(NQ - 1)) + 0.5*(np.log(1/(NQ - 1)))**2)
v_star = np.log(1/(NQ - 1))
# Evaluation of phi(v) and derivatives
v = p_W_index + t_W @ lmbda
phi = (v>v_star) * (v - np.exp(-v)) + (v<=v_star) * (a + b*v + 0.5*c*v**2)
phi1 = (v>v_star) * (1 + np.exp(-v)) + (v<=v_star) * (b + c*v)
phi2 = (v>v_star) * ( - np.exp(-v)) + (v<=v_star) * c
return [phi, phi1, phi2]
def ast_crit(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
This function constructs the AST criterion function
as described in Graham, Pinto and Egel (2016, JBES).
INPUTS
------
lmbda : vector of tilting parameters
D : N x 1 treatment indicator vector
p_W : N x 1 MLEs of the propensity score
p_W_index : index of estimated logit propensity score
t_W : vector of balancing moments
NQ : sample size times the marginal probability of missingness
rlgrz : Regularization parameter. See discussion in main header.
s_wgt : N x 1 vector of known sampling weights (optional)
OUTPUTS
-------
crit : AST criterion function at passed parameter values
Functions called : ast_phi()
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
crit = -np.sum(s_wgt * (D * phi - (t_W @ lmbda)) * (p_W / NQ)) # AST criterion (scalar)
return crit
def ast_foc(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
Returns first derivative vector of AST criterion function with respect
to lmbda. See the header for ast_crit() for description of parameters.
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
foc = -(t_W.T @ (s_wgt * (D * phi1 - 1) * (p_W / NQ))) # AST gradient (1+M x 1 vector)
foc = np.ravel(foc) # make foc 1-dimensional numpy array
return foc
def ast_soc(lmbda, D, p_W, p_W_index, t_W, NQ, rlgrz, s_wgt):
"""
Returns hessian matrix of AST criterion function with respect
to lmbda. See the header for ast_crit() for description of parameters.
"""
lmbda = np.reshape(lmbda,(-1,1)) # make lmda 2-dimensional object
[phi, phi1, phi2] = ast_phi(lmbda, t_W, p_W_index, NQ, rlgrz) # compute phi and 1st/2nd derivatives
soc = -(((s_wgt * D * phi2 * (p_W / NQ)) * t_W).T @ t_W) # AST hessian (note use of numpy broadcasting rules)
# (1 + M) x (1 + M) "matrix" (numpy array)
return [soc]
def ast_study_callback(lmbda):
print("Value of ast_crit = " + "%.6f" % ast_crit(lmbda, Ds, p_W, p_W_index, t_Ws, NQ, rlgrz, sw) + \
", 2-norm of ast_foc = "+ "%.6f" % numpy.linalg.norm(ast_foc(lmbda, Ds, p_W, p_W_index, t_Ws, \
NQ, rlgrz, sw)))
def ast_auxiliary_callback(lmbda):
print("Value of ast_crit = " + "%.6f" % ast_crit(lmbda, 1-Ds, p_W, -p_W_index, t_Ws, NQ, rlgrz, sw) + \
", 2-norm of ast_foc = "+ "%.6f" % numpy.linalg.norm(ast_foc(lmbda, 1-Ds, p_W, -p_W_index, t_Ws, \
NQ, rlgrz, sw)))
# ----------------------------------------------------------------------------------- #
# - STEP 1 : ORGANIZE DATA - #
# ----------------------------------------------------------------------------------- #
# Extract variable names from pandas data objects
dep_var = Y.name # Get dependent variable names
r_W_names = list(r_W.columns) # Get r_W variable names
t_W_names = list(t_W.columns) # Get t_W variable names
# Create pointers to pandas objects transformed into appropriately sized numpy arrays
Ds = D.values.reshape((-1,1)) # Turn pandas.Series into N x 1 numpy array
Ys = Y.values.reshape((-1,1)) # Turn pandas.Series into N x 1 numpy array
r_Ws = r_W.values # Turn pandas.DataFrame into N x 1 + L numpy array
t_Ws = t_W.values # Turn pandas.DataFrame into N x 1 + M numpy array
# Extract basic information and set-up AST problem
N = len(D) # Number of units in sample
Ns = np.sum(D) # Number of study units in the sample (treated units)
Na = N-Ns # Number of auxiliary units in the sample (control units)
M = np.shape(t_Ws)[1]
L = np.shape(r_Ws)[1]
if nocons:
M = M - 1 # Dimension of t_W (excluding constant)
L = L - 1 # Dimension of r_W (excluding constant)
DY = Ds * Ys # D*Y, N x 1 vector of observed outcomes for treated/study units
mDX = (1-Ds) * Ys # (1-D)*X, N x 1 vector of observed outcomes for non-treated/auxiliary units
# Add a constant to the regressor matrix (if needed)
if not nocons:
r_Ws = np.concatenate((np.ones((N,1)), r_Ws), axis=1)
r_W_names = ['constant'] + r_W_names
t_Ws = np.concatenate((np.ones((N,1)), t_Ws), axis=1)
t_W_names = ['constant'] + t_W_names
# Normalize weights to have mean one (if needed)
if s_wgt is None:
sw = 1
else:
s_wgt_var = s_wgt.name # Get sample weight variable name
sw = np.asarray(s_wgt/s_wgt.mean()).reshape((-1,1)) # Normalized sampling weights with mean one
# ----------------------------------------------------------------------------------- #
# - STEP 2 : ESTIMATE PROPENSITY SCORE PARAMETER BY LOGIT ML - #
# ----------------------------------------------------------------------------------- #
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing propensity score by MLE -")
print("--------------------------------------------------------------")
# CMLE of p-score coefficients
[delta_ml, vcov_delta_ml, hess_logl, score_i, p_W, _] = \
logit(D, r_W, s_wgt=s_wgt, nocons=nocons, \
c_id=c_id, silent=silent, full=False)
delta_ml = np.reshape(delta_ml,(-1,1)) # Put delta_ml into 2-dimensional form
p_W_index = r_Ws @ delta_ml # Fitted p-score index
NQ = np.sum(sw * p_W) # Sum of fitted p-scores
pi_eff = (sw * p_W) / NQ # Efficient estimate of F(W)
except:
print("FATAL ERROR: exitflag = 2, unable to compute propensity score by maximum likelihood.")
# Set all returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 2
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# ----------------------------------------------------------------------------------- #
# - STEP 3 : SOLVE FOR AST TILTING PARAMETERS - #
# -----------------------------------------------------------------------------------
# Set optimization parameters
if silent:
# Use Newton-CG solver with vector of zeros as starting values,
# low tolerance levels, and smaller number of allowed iterations.
# Hide iteration output.
options_set = {'xtol': 1e-8, 'maxiter': 1000, 'disp': False}
else:
# Use Newton-CG solver with vector of zeros as starting values,
# high tolerance levels, and larger number of allowed iterations.
# Show iteration output.
options_set = {'xtol': 1e-12, 'maxiter': 10000, 'disp': True}
lambda_sv = np.zeros(1+M) # use vector of zeros as starting values
#------------------------------#
#- STUDY TILT -#
#------------------------------#
# NOTE: Only compute the study_tilt if directed to do so (this is the default). The study_tilt
# doesn't need to be computed if all the elements of t(W) are also included in h(W). It
# is the users responsibility to check this condition.
if study_tilt:
# -------------------------------------------------- #
# - CASE 1: Non-trivial study sample tilt required - #
# -------------------------------------------------- #
# Compute lamba_s_hat (study or treated sample tilting parameters)
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing study/treated sample tilt -")
print("--------------------------------------------------------------")
# Derivative check at starting values
grad_norm = sp.optimize.check_grad(ast_crit, ast_foc, lambda_sv, Ds, p_W, \
p_W_index, t_Ws, NQ, rlgrz, \
sw, epsilon = 1e-12)
print('Study sample tilt derivative check (2-norm): ' + "%.8f" % grad_norm)
# Solve for tilting parameters
lambda_s_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(Ds, p_W, p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
callback = ast_study_callback, options=options_set)
else:
# Solve for tilting parameters
lambda_s_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(Ds, p_W, p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
options=options_set)
except:
print("FATAL ERROR: exitflag = 3, Unable to compute the study/treated vector of tilting parameters.")
# Set all returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 3
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# Collect study tilt estimation results needed below
lambda_s_hat = np.reshape(lambda_s_res.x,(-1,1)) # study/treated sample tilting
# parameter estimates
p_W_s = (1+np.exp(-(p_W_index) - (t_Ws @ lambda_s_hat)))**-1 # study/treated sample tilted p-score
pi_s = Ds * pi_eff / p_W_s # study/treated sample tilt
else:
# ------------------------------------------ #
# - CASE 2: Study sample tilt NOT required - #
# ------------------------------------------ #
if not silent:
print("")
print("----------------------------------------------------------------------")
print("- Tilt of study sample not requested by user (study_tilt = False). -")
print("- Validity of this requires all elements of t(W) to be elements of -")
print("- h(W) as well. User is advised to verify this condition. -")
print("----------------------------------------------------------------------")
print("")
# Collect study tilt objects needed below
lambda_s_hat = np.reshape(lambda_sv ,(-1,1)) # study/treated sample tilting parameters set equal to zero
p_W_s = p_W # study/treated sample tilted p-score equals actual score
pi_s = Ds * pi_eff / p_W_s # set pi_s to "empirical measure" of study sub-sample
# (w/o sampling weights this puts mass 1/Ns on each study unit)
#------------------------------#
#- AUXILIARY TILT -#
#------------------------------#
# Compute lamba_a_hat (auxiliary or control sample tilting parameters)
try:
if not silent:
print("")
print("--------------------------------------------------------------")
print("- Computing auxiliary/control sample tilt -")
print("--------------------------------------------------------------")
# Derivative check at starting values
grad_norm = sp.optimize.check_grad(ast_crit, ast_foc, lambda_sv, 1-Ds, p_W, \
-p_W_index, t_Ws, NQ, rlgrz, \
sw, epsilon = 1e-12)
print('Auxiliary sample tilt derivative check (2-norm): ' + "%.8f" % grad_norm)
# Solve for tilting parameters
lambda_a_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(1-Ds, p_W, -p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
callback = ast_auxiliary_callback, options=options_set)
else:
# Solve for tilting parameters
lambda_a_res = sp.optimize.minimize(ast_crit, lambda_sv, args=(1-Ds, p_W, -p_W_index, \
t_Ws, NQ, rlgrz, sw), \
method='Newton-CG', jac=ast_foc, hess=ast_soc, \
options=options_set)
except:
print("FATAL ERROR: exitflag = 4, Unable to compute the auxiliary/control vector of tilting parameters.")
# Set returnables to "None" and then exit function
gamma_ast = None
vcov_gamma_ast = None
pscore_tests = None
tilts = None
exitflag = 4
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag]
# Collect auxiliary tilt estimation results needed below
lambda_a_hat = -np.reshape(lambda_a_res.x,(-1,1)) # auxiliary/control sample tilting
# parameter estimates
p_W_a = (1+np.exp(-(p_W_index) - (t_Ws @ lambda_a_hat)))**-1 # auxiliary sample tilted p-score
pi_a = (1-Ds) * (pi_eff / (1-p_W_a)) # auxiliary sample tilt
# ----------------------------------------------------------------------------------- #
# - STEP 4 : SOLVE FOR AST ESTIMATE OF GAMMA (i.e., ATT) - #
# ----------------------------------------------------------------------------------- #
# AST estimate of gamma -- the ATT %
gamma_ast = np.sum(sw * p_W * ((Ds / p_W_s) * DY - (1-Ds) / (1-p_W_a) * mDX))/NQ;
# ----------------------------------------------------------------------------------- #
# - STEP 5 : FORM LARGE SAMPLE VARIANCE-COVARIANCE ESTIMATES - #
# ----------------------------------------------------------------------------------- #
# Form moment vector corresponding to full three step procedure
m1 = (sw * (Ds - p_W) * r_Ws).T # 1+L x N matrix of m_1 moments (logit scores)
m2 = (sw * ((1 - Ds) / (1 - p_W_a) - 1) * p_W * t_Ws).T # 1+M x N matrix of m_2 moments
m3 = (sw * (Ds / p_W_s - 1) * p_W * t_Ws).T # 1+M x N matrix of m_3 moments
m4 = (sw * p_W * ((Ds / p_W_s) * DY - ((1-Ds) / (1-p_W_a)) * (mDX+gamma_ast))).T # 1 x N matrix of m_4 moments
m = np.concatenate((m1, m2, m3, m4), axis=0) # 1 + L + 2(1 + M) + 1 x N matrix of all moments
# Calculate covariance matrix of moment vector. Take into account any
# within-group dependence/clustering as needed
if c_id is None:
# Case 1: No cluster dependence to account for when constructing covariance matrix
C = N # Number of clusters equals number of observations
fsc = N/(N - (1+L+2*(1+M)+1)) # Finite-sample correction factor
V_m = fsc*(m @ m.T)/N
else:
# Case 2: Need to correct for cluster dependence when constructing covariance matrix
# Get number and unique list of clusters
c_list = np.unique(c_id)
C = len(c_list)
# Calculate cluster-robust variance-covariance matrix of m
# Sum moment vector within clusters
sum_m = np.empty((C,1+L+2*(1+M)+1)) # initiate vector of cluster-summed moments
for c in range(0,C):
# sum of moments for units in c-th cluster
b_cluster = np.nonzero((c_id == c_list[c]))[0] # Observations in c-th cluster
sum_m[c,:] = np.sum(m[np.ix_(range(0,1+L+2*(1+M)+1), b_cluster)], axis = 1) # Sum over "columns" within c-th cluster
# Compute variance-covariance matrix of moment vector
fsc = (N/(N - (1+L+2*(1+M)+1)))*(C/(C-1)) # Finite-sample correction factor
V_m = fsc*(sum_m.T @ sum_m)/C # Variance-covariance of the summed moments
# Form Jacobian matrix for entire parameter: theta = (rho, delta, lambda, gamma)
e_V = np.exp(np.dot(r_Ws, delta_ml))
e_Va = np.exp(np.dot(r_Ws, delta_ml) + np.dot(t_Ws, lambda_a_hat))
e_Vs = np.exp(np.dot(r_Ws, delta_ml) + np.dot(t_Ws, lambda_s_hat))
M1_delta = np.dot((sw * (- e_V / (1 + e_V)**2) * r_Ws).T, r_Ws)/N # 1 + L x 1 + L
M2_delta = np.dot((sw * ((1 - Ds) / (1 - p_W_a) - 1) * (e_V / (1 + e_V)**2) * t_Ws).T, r_Ws)/N # 1 + M x 1 + L
M3_delta = np.dot((sw * (Ds / p_W_s - 1) * (e_V / (1 + e_V)**2) * t_Ws).T, r_Ws)/N # 1 + M x 1 + L
M4_delta = np.dot((sw * (e_V / (1 + e_V)**2) * \
((Ds / p_W_s) * DY - ((1 - Ds) / (1 - p_W_a)) * (mDX + gamma_ast))).T, r_Ws)/N # 1 x 1 + L
M2_lambda_a = np.dot(( sw * ((1 - Ds) / (1 - p_W_a)**2) * p_W * (e_Va / (1 + e_Va)**2) * t_Ws).T, t_Ws)/N # 1 + M x 1 + M
M4_lambda_a = np.dot((-sw * ((1 - Ds) / (1 - p_W_a)**2) * p_W * (mDX+gamma_ast) * (e_Va / (1 + e_Va)**2)).T, t_Ws)/N # 1 x 1 + M
M3_lambda_s = np.dot((-sw * (Ds / p_W_s**2) * p_W * (e_Vs / (1 + e_Vs)**2) * t_Ws).T, t_Ws)/N # 1 + M x 1 + M
M4_lambda_s = np.dot((-sw * (Ds / p_W_s**2) * p_W * DY * (e_Vs / (1 + e_Vs)**2)).T, t_Ws)/N # 1 x 1 + M
M4_gamma = -(NQ/N).reshape(1,1) # 1 x 1
M1 = np.hstack((M1_delta, np.zeros((1+L,1+M)), np.zeros((1+L,1+M)), np.zeros((1+L,1))))
M2 = np.hstack((M2_delta, M2_lambda_a, np.zeros((1+M,1+M)), np.zeros((1+M,1))))
M3 = np.hstack((M3_delta, np.zeros((1+M,1+M)), M3_lambda_s, np.zeros((1+M,1))))
M4 = np.hstack((M4_delta, M4_lambda_a, M4_lambda_s, M4_gamma))
# Concatenate Jacobian and compute inverse
M_hat = (N/C)*np.vstack((M1, M2, M3, M4))
iM_hat = np.linalg.inv(M_hat)
# Compute sandwich variance estimates
vcov_theta_ast = (iM_hat @ V_m @ iM_hat.T)/C
vcov_gamma_ast = vcov_theta_ast[-1,-1]
exitflag = 1 # AST estimate of the ATT successfully computed!
# ----------------------------------------------------------------------------------- #
# - STEP 6 : COMPUTE TEST STATISTICS BASED ON TILTING PARAMETER - #
# ----------------------------------------------------------------------------------- #
# Compute propensity score specification test based on study tilt (if applicable)
if study_tilt:
iV_lambda_s = np.linalg.inv(vcov_theta_ast[1+L:1+L+1+M,1+L:1+L+1+M])
ps_test_st = np.dot(np.dot(lambda_s_hat.T, iV_lambda_s), lambda_s_hat)
dof_st = len(lambda_s_hat)
pval_st = 1 - sp.stats.chi2.cdf(ps_test_st[0,0], dof_st)
study_test = [ps_test_st[0,0], dof_st, pval_st]
else:
study_test = [None, None, None]
# Compute propensity score specification test based on auxiliary tilt (always done)
iV_lambda_a = np.linalg.inv(vcov_theta_ast[1+L+1+M:1+L+1+M+1+M,1+L+1+M:1+L+1+M+1+M])
ps_test_at = np.dot(np.dot(lambda_a_hat.T, iV_lambda_a), lambda_a_hat)
dof_at = len(lambda_a_hat)
pval_at = 1 - sp.stats.chi2.cdf(ps_test_at[0,0], dof_at)
auxiliary_test = [ps_test_at[0,0], dof_at, pval_at]
# ----------------------------------------------------------------------------------- #
# - STEP 7 : DISPLAY RESULTS - #
# ----------------------------------------------------------------------------------- #
if not silent:
print("")
print("-------------------------------------------------------------------------------------------")
print("- Auxiliary-to-Study (AST) estimates of the ATT -")
print("-------------------------------------------------------------------------------------------")
print("ATT: " + "%10.6f" % gamma_ast)
print(" (" + "%10.6f" % np.sqrt(vcov_gamma_ast) + ")")
print("")
print("-------------------------------------------------------------------------------------------")
if c_id is None:
print("NOTE: Outcome variable = " + dep_var)
print(" Heteroscedastic-robust standard errors reported")
print(" N1 = " "%0.0f" % Ns + ", N0 = " + "%0.0f" % Na)
else:
print("NOTE: Outcome variable = " + dep_var)
print(" Cluster-robust standard errors reported")
print(" Cluster-variable = " + c_id.name)
print(" Number of clusters = " + "%0.0f" % C)
print(" N1 = " "%0.0f" % Ns + ", N0 = " + "%0.0f" % Na)
if s_wgt is not None:
print("NOTE: (Sampling) Weighted AST estimates computed.")
print(" Weight-variable = " + s_wgt_var)
print("")
print("-------------------------------------------------------------------------------------------")
print("- Maximum likelihood estimates of the p-score -")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in r_W_names:
print(names.ljust(25) + "%10.6f" % delta_ml[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[c,c]) + ")")
c += 1
print("")
print("-------------------------------------------------------------------------------------------")
print("- Tilting parameter estimates -")
print("-------------------------------------------------------------------------------------------")
if study_tilt:
print("")
print("TREATED (study) sample tilt")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%10.6f" % lambda_s_hat[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[1+L+c,1+L+c]) + ")")
c += 1
print("")
print("Specification test for p-score (H_0 : lambda_s = 0)")
print("-------------------------------------------------------------------------------------------")
print("chi-square("+str(dof_st)+") = " + "%10.6f" % ps_test_st + " p-value: " + "% .6f" % pval_st)
print("")
print("Summary statistics study/treated re-weighting")
print("-------------------------------------------------------------------------------------------")
j = np.where(D)[0] # find indices of treated units
N_s_eff = 1/np.sum(pi_s[j]**2) # Kish's formula for effective sample size
print("Kish's effective study/treated sample size = " "%0.0f" % N_s_eff)
print("")
print("Percentiles of N_s * pi_s distribution")
quantiles = [1, 5, 10, 25, 50, 75, 90, 95, 99]
qnt_pi_s = np.percentile(Ns*pi_s[j],quantiles)
c = 0
for q in quantiles:
print("%2.0f" % quantiles[c] + " percentile = " "%2.4f" % qnt_pi_s[c])
c += 1
else:
print("")
print("--------------------------------------------------------")
print("- NOTE: Study tilt not computed (study_tilt = False). -")
print("- Components of t(W) assumed to be also in h(W). -")
print("--------------------------------------------------------")
print("")
print("")
print("CONTROL (auxiliary) sample tilt")
print("-------------------------------------------------------------------------------------------")
print("")
print("Independent variable Coef. ( Std. Err.) ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%10.6f" % lambda_a_hat[c] + \
" (" + "%10.6f" % np.sqrt(vcov_theta_ast[1+L+1+M+c,1+L+1+M+c]) + ")")
c += 1
print("")
print("Specification test for p-score (H_0 : lambda_a = 0)")
print("-------------------------------------------------------------------------------------------")
print("chi-square("+str(dof_at)+") = " + "%10.6f" % ps_test_at + " p-value: " + "% .6f" % pval_at)
print("")
print("Summary statistics auxiliary/control re-weighting")
print("-------------------------------------------------------------------------------------------")
j = np.where(1-D)[0] # find indices of control units
N_a_eff = 1/np.sum(pi_a[j]**2) # Kish's formula for effective sample size
print("Kish's effective auxiliary/control sample size = " "%0.0f" % N_a_eff)
print("")
print("Percentiles of N_a * pi_a distribution")
quantiles = [1, 5, 10, 25, 50, 75, 90, 95, 99]
qnt_pi_a = np.percentile(Na*pi_a[j],quantiles)
c = 0
for q in quantiles:
print("%2.0f" % quantiles[c] + " percentile = " "%2.4f" % qnt_pi_a[c])
c += 1
# ------------------------------------------- #
# Construct "exact balancing" table - #
# ------------------------------------------- #
Na_wgt = np.sum(sw * (1-Ds) , axis = 0)
Ns_wgt = np.sum(sw * Ds , axis = 0)
# Compute means of t_W across various distribution function estimates
# Mean of t(W) across controls
mu_t_D0 = np.sum(sw * (1-Ds) * t_Ws, axis = 0)/Na_wgt
mu_t_D0_std = np.sqrt(np.sum(sw * (1-Ds) * (t_Ws - mu_t_D0)**2, axis = 0)/Na_wgt)
# Mean of t(W) across treated
mu_t_D1 = np.sum(sw * Ds * t_Ws, axis = 0)/Ns_wgt
mu_t_D1_std = np.sqrt(np.sum(sw * Ds * (t_Ws - mu_t_D1)**2, axis = 0)/Ns_wgt)
# Normalized mean differences across treatment and controls
# (cf., Imbens, 2015, Journal of Human Resources)
NormDif_t = (mu_t_D1 - mu_t_D0)/np.sqrt((mu_t_D1_std**2 + mu_t_D0_std**2)/2)
# Semiparametrically efficient estimate of mean of t(W) across treated
mu_t_eff = np.sum(pi_eff * t_Ws, axis = 0)
mu_t_eff_std = np.sqrt(np.sum(pi_eff * (t_Ws - mu_t_eff)**2, axis = 0))
# Mean of t(W) across controls after re-weighting
mu_t_a = np.sum(pi_a * t_Ws, axis = 0)
mu_t_a_std = np.sqrt(np.sum(pi_a * (t_Ws - mu_t_a)**2, axis = 0))
# Mean of t(W) across treated after re-weighting
mu_t_s = np.sum(pi_s * t_Ws, axis = 0)
mu_t_s_std = np.sqrt(np.sum(pi_s * (t_Ws - mu_t_s)**2, axis = 0))
# Pre-balance table
print("")
print("Means & standard deviations of t_W (pre-balance) ")
print("-------------------------------------------------------------------------------------------")
print(" Treated (D = 1) Control (D = 0) Norm. Diff. ")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%8.4f" % mu_t_D1[c] + " (" + "%8.4f" % mu_t_D1_std[c] + ") " \
+ "%8.4f" % mu_t_D0[c] + " (" + "%8.4f" % mu_t_D0_std[c] + ") " \
+ "%8.4f" % NormDif_t[c])
c += 1
# Post-balance table
print("")
print("Means and standard deviations of t_W (post-balance) ")
print("-------------------------------------------------------------------------------------------")
print(" Treated (D = 1) Control (D = 0) Efficient (D = 1)")
print("-------------------------------------------------------------------------------------------")
c = 0
for names in t_W_names:
print(names.ljust(25) + "%8.4f" % mu_t_s[c] + " (" + "%8.4f" % mu_t_s_std[c] + ") " \
+ "%8.4f" % mu_t_a[c] + " (" + "%8.4f" % mu_t_a_std[c] + ") " \
+ "%8.4f" % mu_t_eff[c] + " (" + "%8.4f" % mu_t_eff_std[c] + ") ")
c += 1
# Collect/format remaining returnables and exit function
pscore_tests = [study_test, auxiliary_test] # Collect p-score test results
tilts = np.concatenate((pi_eff, pi_s, pi_a), axis=1) # Collect three sample tilts
return [gamma_ast, vcov_gamma_ast, pscore_tests, tilts, exitflag] | mit |
MarcusTherkildsen/HackThisSite | prog_missions/4/main.py | 1 | 3379 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 07 22:10:39 2015
@author: Marcus Therkildsen
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
def deg2rad(deg_in):
return deg_in*np.pi/180
def xy(r,phi):
return r*np.cos(phi), r*np.sin(phi)
if __name__ == '__main__':
# Load the xml file
xml = np.genfromtxt('plotMe.xml', delimiter=',', dtype='str',autostrip = True)
# Number of line elements
num_lines = list(xml).count('<Line>')
# Number of arc elements
num_arcs = list(xml).count('<Arc>')
# Prepare arrays
lines = np.zeros([num_lines,4]) # (x_start,x_end,y_start,y_end)
c_lines = np.empty(num_lines,dtype='str')
arcs = np.zeros([num_arcs,5]) # (x_center,y_center, arc_start (in degrees), arc_extend (in degrees), radius)
c_arcs = np.empty(num_arcs,dtype='str')
# Go through xml document
tj_lines = -1
tj_arcs = -1
for i in xrange(len(xml)):
if '<Line>' in xml[i]:
tj_lines+=1
# In case no color is defined, predefine it to be white
color_ = 'w'
for k in xrange(5):
if 'YEnd' in xml[i+k+1]:
y_end = float(xml[i+k+1][6:-7])
elif 'YStart' in xml[i+k+1]:
y_start = float(xml[i+k+1][8:-9])
elif 'XEnd' in xml[i+k+1]:
x_end = float(xml[i+k+1][6:-7])
elif 'XStart' in xml[i+k+1]:
x_start = float(xml[i+k+1][8:-9])
elif 'Color' in xml[i+k+1]:
color_ = xml[i+k+1][7:-8]
lines[tj_lines,:] = [x_start, x_end, y_start, y_end]
c_lines[tj_lines] = color_
if '<Arc>' in xml[i]:
tj_arcs+=1
# In case no color is defined, predefine it to be white
color_ = 'w'
for k in xrange(6):
if 'XCenter' in xml[i+k+1]:
x_center = float(xml[i+k+1][9:-10])
elif 'YCenter' in xml[i+k+1]:
y_center = float(xml[i+k+1][9:-10])
elif 'ArcStart' in xml[i+k+1]:
arc_start = float(xml[i+k+1][10:-11])
elif 'ArcExtend' in xml[i+k+1]:
arc_extend = float(xml[i+k+1][11:-12])
elif 'Radius' in xml[i+k+1]:
radius = float(xml[i+k+1][8:-9])
elif 'Color' in xml[i+k+1]:
color_ = xml[i+k+1][7:-8]
arcs[tj_arcs,:] = [x_center,y_center,arc_start,arc_extend,radius]
c_arcs[tj_arcs] = color_
"""
Plot
"""
fig, ax =plt.subplots()
# Color background black
ax.set_axis_bgcolor('k')
[ax.plot(lines[i,:2],lines[i,2:],color = c_lines[i]) for i in xrange(num_lines)]
# Plot the arcs. Remember that the arc should begin at arc_start and end at arc_start + arc_extend
for i in xrange(num_arcs):
stuff = np.array(xy(arcs[i,4],np.arange(deg2rad(arcs[i,2]),deg2rad(arcs[i,2])+deg2rad(arcs[i,3]),0.1))).T
x_ = stuff[:,0]+arcs[i,0]
y_ = stuff[:,1]+arcs[i,1]
ax.plot(x_,y_,color = c_arcs[i])
# Remove labels
plt.setp( ax.get_xticklabels(), visible=False)
plt.setp( ax.get_yticklabels(), visible=False)
plt.savefig('done.png',dpi=400,bbox_inches='tight')
plt.show() | mit |
samzhang111/scikit-learn | sklearn/naive_bayes.py | 11 | 28770 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
waterponey/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
bsipocz/pyspeckit | pyspeckit/cubes/mapplot.py | 2 | 16597 | """
MapPlot
-------
Make plots of the cube and interactively connect them to spectrum plotting.
This is really an interactive component of the package; nothing in here is
meant for publication-quality plots, but more for user interactive analysis.
That said, the plotter makes use of `APLpy <https://github.com/aplpy/aplpy>`_,
so it is possible to make publication-quality plots.
:author: Adam Ginsburg
:date: 03/17/2011
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import matplotlib
import matplotlib.pyplot
import matplotlib.figure
import numpy as np
import copy
import itertools
from pyspeckit.specwarnings import warn
try:
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
pywcsOK = True
except ImportError:
try:
import pyfits
import pywcs
pywcsOK = True
except ImportError:
pywcsOK = False
import cubes
try:
import aplpy
icanhasaplpy = True
except: # aplpy fails with generic exceptions instead of ImportError
icanhasaplpy = False
try:
import coords
icanhascoords = True
except ImportError:
icanhascoords = False
class MapPlotter(object):
"""
Class to plot a spectrum
See `mapplot` for use documentation; this docstring is only for
initialization.
"""
def __init__(self, Cube=None, figure=None, doplot=False, **kwargs):
"""
Create a map figure for future plotting
"""
# figure out where to put the plot
if isinstance(figure,matplotlib.figure.Figure):
self.figure = figure
elif type(figure) is int:
self.figure = matplotlib.pyplot.figure(figure)
else:
self.figure = None
self.axis = None
self.FITSFigure = None
self._click_marks = []
self._circles = []
self._clickX = None
self._clickY = None
self.overplot_colorcycle = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y'])
self.overplot_linestyle = '-'
self.Cube = Cube
if self.Cube is not None:
self.header = cubes.flatten_header(self.Cube.header, delete=True)
if pywcsOK:
self.wcs = pywcs.WCS(self.header)
if doplot: self.mapplot(**kwargs)
def __call__(self, **kwargs):
""" see mapplot """
return self.mapplot(**kwargs)
def mapplot(self, convention='calabretta', colorbar=True, useaplpy=True,
vmin=None, vmax=None, cmap=None, plotkwargs={}, **kwargs):
"""
Plot up a map based on an input data cube.
The map to be plotted is selected using `makeplane`.
The `estimator` keyword argument is passed to that function.
The plotted map, once shown, is interactive. You can click on it with any
of the three mouse buttons.
Button 1 or keyboard '1':
Plot the selected pixel's spectrum in another window. Mark the
clicked pixel with an 'x'
Button 2 or keyboard 'o':
Overplot a second (or third, fourth, fifth...) spectrum in the
external plot window
Button 3:
Disconnect the interactive viewer
You can also click-and-drag with button 1 to average over a circular
region. This same effect can be achieved by using the 'c' key to
set the /c/enter of a circle and the 'r' key to set its /r/adius (i.e.,
hover over the center and press 'c', then hover some distance away and
press 'r').
Parameters
----------
convention : 'calabretta' or 'griesen'
The default projection to assume for Galactic data when plotting
with aplpy.
colorbar : bool
Whether to show a colorbar
plotkwargs : dict, optional
A dictionary of keyword arguments to pass to aplpy.show_colorscale
or matplotlib.pyplot.imshow
useaplpy : bool
Use aplpy if a FITS header is available
vmin, vmax: float or None
Override values for the vmin/vmax values. Will be automatically
determined if left as None
.. todo:
Allow mapplot in subfigure
"""
if self.figure is None:
self.figure = matplotlib.pyplot.figure()
else:
self._disconnect()
self.figure.clf()
# this is where the map is created; everything below this is just plotting
self.makeplane(**kwargs)
# have tot pop out estimator so that kwargs can be passed to imshow
if 'estimator' in kwargs:
kwargs.pop('estimator')
# Below here is all plotting stuff
if vmin is None: vmin = self.plane[self.plane==self.plane].min()
if vmax is None: vmax = self.plane[self.plane==self.plane].max()
if icanhasaplpy and useaplpy:
self.fitsfile = pyfits.PrimaryHDU(data=self.plane,header=self.header)
self.FITSFigure = aplpy.FITSFigure(self.fitsfile,figure=self.figure,convention=convention)
self.FITSFigure.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
self.axis = self.FITSFigure._ax1
if colorbar:
try:
self.FITSFigure.add_colorbar()
except Exception as ex:
print "ERROR: Could not create colorbar! Error was %s" % str(ex)
self._origin = 0 # FITS convention
# TODO: set _origin to 1 if using PIXEL units, not real wcs
else:
self.axis = self.figure.add_subplot(111)
if hasattr(self,'colorbar') and self.colorbar is not None:
if self.colorbar.ax in self.axis.figure.axes:
self.axis.figure.delaxes(self.colorbar.ax)
self.axis.imshow(self.plane, vmin=vmin, vmax=vmax, cmap=cmap, **plotkwargs)
if colorbar:
try:
self.colorbar = matplotlib.pyplot.colorbar(self.axis.images[0])
except Exception as ex:
print "ERROR: Could not create colorbar! Error was %s" % str(ex)
self._origin = 0 # normal convention
self.canvas = self.axis.figure.canvas
self._connect()
def _connect(self):
""" Connect click, click up (release click), and key press to events """
self.clickid = self.canvas.callbacks.connect('button_press_event',self.click)
self.clickupid = self.canvas.callbacks.connect('button_release_event',self.plot_spectrum)
self.keyid = self.canvas.callbacks.connect('key_press_event',self.plot_spectrum)
def _disconnect(self):
""" Disconnect click, click up (release click), and key press from events """
if hasattr(self,'canvas'):
self.canvas.mpl_disconnect(self.clickid)
self.canvas.mpl_disconnect(self.clickupid)
self.canvas.mpl_disconnect(self.keyid)
def makeplane(self, estimator=np.mean):
"""
Create a "plane" view of the cube, either by slicing or projecting it
or by showing a slice from the best-fit model parameter cube.
Parameters
----------
estimator : [ function | 'max' | 'int' | FITS filename | integer | slice ]
A non-pythonic, non-duck-typed variable. If it's a function, apply that function
along the cube's spectral axis to obtain an estimate (e.g., mean, min, max, etc.).
'max' will do the same thing as passing np.max
'int' will attempt to integrate the image (which is why I didn't duck-type)
(integrate means sum and multiply by dx)
a .fits filename will be read using pyfits (so you can make your own cover figure)
an integer will get the n'th slice in the parcube if it exists
If it's a slice, slice the input data cube along the Z-axis with this slice
"""
# THIS IS A HACK!!! isinstance(a function, function) must be a thing...
FUNCTION = type(np.max)
# estimator is NOT duck-typed
if type(estimator) is FUNCTION:
self.plane = estimator(self.Cube.cube,axis=0)
elif type(estimator) is str:
if estimator == 'max':
self.plane = self.Cube.cube.max(axis=0)
elif estimator == 'int':
dx = np.abs(self.Cube.xarr[1:] - self.Cube.xarr[:-1])
dx = np.concatenate([dx,[dx[-1]]])
self.plane = (self.Cube.cube * dx[:,np.newaxis,np.newaxis]).sum(axis=0)
elif estimator[-5:] == ".fits":
self.plane = pyfits.getdata(estimator)
elif type(estimator) is slice:
self.plane = self.Cube.cube[estimator,:,:]
elif type(estimator) is int:
if hasattr(self.Cube,'parcube'):
self.plane = self.Cube.parcube[estimator,:,:]
if self.plane is None:
raise ValueError("Invalid estimator %s" % (str(estimator)))
if np.sum(np.isfinite(self.plane)) == 0:
raise ValueError("Map is all NaNs or infs. Check your estimator or your input cube.")
def click(self,event):
"""
Record location of downclick
"""
if event.inaxes:
self._clickX = np.round(event.xdata) - self._origin
self._clickY = np.round(event.ydata) - self._origin
def plot_spectrum(self, event, plot_fit=True):
"""
Connects map cube to Spectrum...
"""
self.event = event
if event.inaxes:
clickX = np.round(event.xdata) - self._origin
clickY = np.round(event.ydata) - self._origin
# grab toolbar info so that we don't do anything if a tool is selected
tb = self.canvas.toolbar
if tb.mode != '':
return
elif event.key is not None:
if event.key == 'c':
self._center = (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
elif event.key == 'r':
x,y = self._center
self._add_circle(x,y,clickX,clickY)
self.circle(x,y,clickX-1,clickY-1)
elif event.key == 'o':
clickX,clickY = round(clickX),round(clickY)
print "OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1)
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.key in ('1','2'):
event.button = int(event.key)
event.key = None
self.plot_spectrum(event)
elif (hasattr(event,'button') and event.button in (1,2)
and not (self._clickX == clickX and self._clickY == clickY)):
if event.button == 1:
self._remove_circle()
clear=True
color = 'k'
linestyle = 'steps-mid'
else:
color = self.overplot_colorcycle.next()
linestyle = self.overplot_linestyle
clear=False
rad = ( (self._clickX-clickX)**2 + (self._clickY-clickY)**2 )**0.5
print "Plotting circle from point %i,%i to %i,%i (r=%f)" % (self._clickX-1,self._clickY-1,clickX-1,clickY-1,rad)
self._add_circle(self._clickX,self._clickY,clickX,clickY)
self.circle(self._clickX-1,self._clickY-1,clickX-1,clickY-1,clear=clear,linestyle=linestyle,color=color)
elif hasattr(event,'button') and event.button is not None:
if event.button==1:
clickX,clickY = round(clickX),round(clickY)
print "Plotting spectrum from point %i,%i" % (clickX-1,clickY-1)
self._remove_circle()
self._add_click_mark(clickX,clickY,clear=True)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=True)
if plot_fit: self.Cube.plot_fit(clickX-1, clickY-1, silent=True)
elif event.button==2:
clickX,clickY = round(clickX),round(clickY)
print "OverPlotting spectrum from point %i,%i" % (clickX-1,clickY-1)
color=self.overplot_colorcycle.next()
self._add_click_mark(clickX,clickY,clear=False, color=color)
self.Cube.plot_spectrum(clickX-1,clickY-1,clear=False, color=color, linestyle=self.overplot_linestyle)
elif event.button==3:
print "Disconnecting GAIA-like tool"
self._disconnect()
else:
print "Call failed for some reason: "
print "event: ",event
else:
pass
# never really needed... warn("Click outside of axes")
def _add_click_mark(self,x,y,clear=False,color='k'):
"""
Add an X at some position
"""
if clear:
self._clear_click_marks()
if self.FITSFigure is not None:
label = 'xmark%i' % (len(self._click_marks)+1)
x,y = self.FITSFigure.pixel2world(x,y)
self.FITSFigure.show_markers(x,y,marker='x',c=color,layer=label)
self._click_marks.append( label )
else:
self._click_marks.append( self.axis.plot(x,y,'kx') )
self.refresh()
def _clear_click_marks(self):
"""
Remove all marks added by previous clicks
"""
if self.FITSFigure is not None:
for mark in self._click_marks:
if mark in self.FITSFigure._layers:
self.FITSFigure.remove_layer(mark)
else:
for mark in self._click_marks:
self._click_marks.remove(mark)
if mark in self.axis.lines:
self.axis.lines.remove(mark)
self.refresh()
def _add_circle(self,x,y,x2,y2,**kwargs):
"""
"""
if self.FITSFigure is not None:
x,y = self.FITSFigure.pixel2world(x,y)
x2,y2 = self.FITSFigure.pixel2world(x2,y2)
r = (np.linalg.norm(np.array([x,y])-np.array([x2,y2])))
#self.FITSFigure.show_markers(x,y,s=r,marker='o',facecolor='none',edgecolor='black',layer='circle')
layername = "circle%02i" % len(self._circles)
self.FITSFigure.show_circles(x,y,r,edgecolor='black',facecolor='none',layer=layername,**kwargs)
self._circles.append(layername)
else:
r = np.linalg.norm(np.array([x,y])-np.array([x2,y2]))
circle = matplotlib.patches.Circle([x,y],radius=r,**kwargs)
self._circles.append( circle )
self.axis.patches.append(circle)
self.refresh()
def _remove_circle(self):
"""
"""
if self.FITSFigure is not None:
for layername in self._circles:
if layername in self.FITSFigure._layers:
self.FITSFigure.remove_layer(layername)
else:
for circle in self._circles:
if circle in self.axis.patches:
self.axis.patches.remove(circle)
self._circles.remove(circle)
self.refresh()
def refresh(self):
if self.axis is not None:
self.axis.figure.canvas.draw()
def circle(self,x1,y1,x2,y2,**kwargs):
"""
Plot the spectrum of a circular aperture
"""
r = (np.linalg.norm(np.array([x1,y1])-np.array([x2,y2])))
self.Cube.plot_apspec([x1,y1,r],**kwargs)
#self.Cube.data = cubes.extract_aperture( self.Cube.cube, [x1,y1,r] , coordsys=None )
#self.Cube.plotter()
def copy(self, parent=None):
"""
Create a copy of the map plotter with blank (uninitialized) axis & figure
[ parent ]
A spectroscopic axis instance that is the parent of the specfit
instance. This needs to be specified at some point, but defaults
to None to prevent overwriting a previous plot.
"""
newmapplot = copy.copy(self)
newmapplot.Cube = parent
newmapplot.axis = None
newmapplot.figure = None
return newmapplot
| mit |
geodynamics/burnman | setup.py | 2 | 1353 | from __future__ import absolute_import
import re
versionstuff = dict(
re.findall("(.+) = '(.+)'\n", open('burnman/version.py').read()))
metadata = dict(name='burnman',
version=versionstuff['version'],
description='a thermoelastic and thermodynamic toolkit for Earth and planetary sciences',
url='http://burnman.org',
author='The BurnMan Team',
author_email='[email protected]',
license='GPL',
long_description='BurnMan is a Python library for generating thermodynamic and thermoelastic models of planetary interiors.',
packages=['burnman', 'burnman.minerals', 'burnman.eos'],
package_data={'burnman': ['data/input_*/*']},
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4'],
)
# Try to use setuptools in order to check dependencies.
# if the system does not have setuptools, fall back on
# distutils.
try:
from setuptools import setup
metadata['install_requires'] = ['numpy', 'matplotlib', 'scipy', 'sympy']
except ImportError:
from distutils.core import setup
setup(**metadata)
| gpl-2.0 |
NYU-CS6313-Projects/Charts-for-CompStat | data/crash_cleaner.py | 1 | 3650 | #!/user/bin/python
# this python script cleans raw crash data and subsets the last n days of observations
# if n=-1 all rows of the raw dataset are kept
# WEEK and YEAR attributes are derived
import pandas as pd
import numpy as np
import datetime as dt
import re
import os
import logging
dpath = './'
def date_parser(ds):
if type(ds) == str:
return dt.datetime.date(dt.datetime.strptime(ds, "%m/%d/%Y"))
else:
return np.nan
def time_parser(ts):
if type(ts) == str:
return dt.datetime.time(dt.datetime.strptime(ts, "%H:%M"))
else:
return np.nan
#zip-s war by [email protected]
def zip_cleaner(s):
if type(s) != str:
return np.nan
elif re.match('^\d\d\d\d\d$', s):
return s
elif re.match('^\d\d\d\d\d-\d*$', s):
return re.sub('-\d*$', '', s)
else:
return np.nan
def test_zip_cleaner():
assert '12345' == zip_cleaner('12345')
assert '12345' == zip_cleaner('12345-1234')
assert np.isnan( zip_cleaner(np.nan) )
assert np.isnan( zip_cleaner('1234') )
assert np.isnan( zip_cleaner('0') )
assert np.isnan( zip_cleaner('UNKNOWN'))
# reads the raw crash data
def read_crash_csv(data):
df = pd.read_csv(data,
dtype={
'DATE' : str,
'TIME' : str,
'BOROUGH': str,
'ZIP CODE': str,
'LATITUDE': np.floating,
'LONGITUDE': np.floating,
'LOCATION' : str, # derived type
'ON STREET NAME' : str,
'CROSS STREET NAME': str,
'OFF STREET NAME' : str,
'NUMBER OF PERSONS INJURED' : np.integer,
'NUMBER OF PERSONS KILLED' : np.integer,
'NUMBER OF PEDESTRIANS INJURED' : np.integer,
'NUMBER OF PEDESTRIANS KILLED' : np.integer,
'NUMBER OF CYCLIST INJURED' : np.integer,
'NUMBER OF CYCLIST KILLED' : np.integer,
'NUMBER OF MOTORIST INJURED' : np.integer,
'NUMBER OF MOTORIST KILLED' : np.integer,
'CONTRIBUTING FACTOR VEHICLE 1' : str,
'CONTRIBUTING FACTOR VEHICLE 2' : str,
'CONTRIBUTING FACTOR VEHICLE 3' : str,
'CONTRIBUTING FACTOR VEHICLE 4' : str,
'CONTRIBUTING FACTOR VEHICLE 5' : str,
'UNIQUE KEY' : np.integer,
'VEHICLE TYPE CODE 1' : str,
'VEHICLE TYPE CODE 2' : str,
'VEHICLE TYPE CODE 3' : str,
'VEHICLE TYPE CODE 4' : str,
'VEHICLE TYPE CODE 5' : str})
df['DATE'] = map(date_parser, df['DATE'])
df['TIME'] = map(time_parser, df['TIME'])
df['LOCATION'] = zip(df.LATITUDE,df.LONGITUDE)
df['ZIP CODE'] = map(zip_cleaner,df['ZIP CODE'])
df['WEEK'] = df['DATE'].apply(lambda x: pd.to_datetime(x).week)
df['YEAR'] = df['DATE'].apply(lambda x: pd.to_datetime(x).year)
df.columns = [field.replace(" ","_") for field in df.columns]
return(df)
# subsets the last n days of the crash data and logs a number of records in the dataset
# no subseting if n=-1
def sample_crash_data(n,path,folder):
df = read_crash_csv(os.path.join(path,'crashdata.csv'))
logging.basicConfig(filename=os.path.join(path,'sample.log'),level=logging.DEBUG)
df_new = df
if n!=-1:
start = dt.date.today()
logging.info('As for %s raw data set contains %s records ...' % (dt.datetime.strftime(start,"%m/%d/%Y %H:%M:%S")
,df.shape[0]))
end = dt.date.today()-dt.timedelta(days=n)
df_new = df[(df.DATE >= end) & (df.DATE <= start)]
df_new.to_csv(os.path.join(path,'%sdays_crashdata.csv' %(n)), index=False)
logging.info('Raw data set for the last %s days contains %s records' % (n, df_new.shape[0]))
else:
df_new.to_csv(os.path.join(path,'%srows_crashdata.csv' %(df_new.shape[0])), index=False)
# n = 150; n =-1
if __name__ == "__main__":
sample_crash_data(150,dpath,'data')
sample_crash_data(-1,dpath,'data')
| mit |
hsuantien/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
laurensstoop/HiSPARC-BONZ | egg/legacy/egg_saskia_v4.2.py | 1 | 9204 | # -*- coding: utf-8 -*-
#
#################################################################################################
# #
# Program for analysing HiSPARC data #
# #
# This software is made under the GNU General Public License, version 3 (GPL-3.0) #
# #
#################################################################################################
"""
===================================
Created on Thu Mar 24 13:17:57 2016
@author: Laurens Stoop
===================================
"""
################################## HEADER ##################################
"""
Import of Packages
"""
import sapphire # The HiSparc Python Framework
import tables # A HDF5 python module that allows to store data
import datetime # A package to decode the timeformat of HiSparc data
import matplotlib.pyplot as plt # Plotting functionality of MatPlotLib
import numpy as np # This is NumPy
import os.path # To check if files exist (so you don't do stuff again)
import rootpy.plotting # Get the pythonesc version of ROOT
from rootpy.plotting import root2matplotlib
from matplotlib.colors import LogNorm
"""
Getting the data file and setting the variables
"""
# Time between which the data is downloaded (jjjj,mm,dd,[hh])
START = datetime.datetime(2015,01,01)
END = datetime.datetime(2016,01,01)
# Give the list of stations
STATIONS = [501,503,1006,1101,3001,13002,14001,20003]
# Do not show the figures
plt.ioff()
################################## BODY ##################################
"""
Data acquisition
"""
# Open a data file (automatic close)
with tables.open_file('egg_saskia.h5','a') as data_file:
# Retrieve for every station the data and plot a pulsehisto
for station in STATIONS:
# Set the station name (this is the group name in the file)
station_name = '/s%d' %station
# Data is downloaded
if station_name not in data_file:
# Let them know what we do
print "\nGetting event data from station %d " % station
# Now retrieve the event data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'events', # Download events (or 'weather')
True) # Show progress
# Let them know what we do
print "\nGetting wheater data from station %d " % station
# Now retrieve the wheater data
sapphire.esd.download_data(
data_file, # File (as opened above)
station_name, # Group name (/s..station..)
station, # Station number
START, # Start data date
END, # End data date
'weather', # Download wheater
True) # Show progress
# If the datafile has the group we do not download them data
else:
print "All data present for station %d" % station
####### Pulseheight histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pulseheigt_histogram_%d.pdf' % station):
# Say if the plot is present
print "Plot already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Set the figure
figure_pulse = plt.figure(station)
# Get the pulseheight from all events
data_pulseheight = event_data.col('pulseheights') # col takes all data from events
# Creates bins so that the ugly shit is taken away
bins = np.linspace(0, 4500, 201)
# Plotting the pulseheigth for all events
plt.hist(
data_pulseheight, # Plot the Pulseheight
bins, # Number of bins
histtype='step', # Make the histogram a step function
log=True) # With a logarithmic scale
# Setting the plot labels and title
plt.xlabel("Pulseheight [ADC]")
plt.ylabel("Counts")
plt.title("Pulseheight histogram (log scale) for station (%d)" %station)
# Saving them Pica
plt.savefig(
'pulseheigt_histogram_%d.pdf' % station, # Name of the file
bbox_inches='tight') # Use less whitespace
# Necessary to avoid multiplotting in one figure and to close memory leak
plt.close(figure_pulse)
####### Pulseheight vs pulse integral histograms #######
# If the plot exist we skip the plotting
if os.path.isfile('pmt_saturation_s%d.pdf' %station):
# Say if the plot is present
print "PMT saturation histogram already present for station %d" % station
# If there is no plot we make it
else:
# Get event data
event_data = data_file.get_node(
station_name, # From the group (/s..station..)
'events') # Get the node with events
# Get the pulseheight from all events
data_pulseheights = event_data.col('pulseheights') # col takes all data from events (this improves the speed)
# Get the integral from all events
data_integrals = event_data.col('integrals') # col takes all data from events
# Make a figure so it can be closed
figure_combo, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex = 'col', sharey = 'row')
# Setting the plot titles
ax1.set_title('Detector 1')
ax2.set_title('Detector 2')
ax3.set_title('Detector 3')
ax4.set_title('Detector 4')
# Setting the plot labels
ax1.set_ylabel('Pulseheight [ADC]')
ax3.set_ylabel('Pulseheight [ADC]')
ax3.set_xlabel('Pulse integral [ADC.ns]')
ax4.set_xlabel('Pulse integral [ADC.ns]')
# Now we plot the data of every detector
for detector in range(0,4):
# Select the detector data
data_pulseheight_detector = data_pulseheights[:,detector]
data_integral_detector = data_integrals[:,detector]
# Combine the detector data
data_combo = np.stack(
(data_integral_detector, # The pulse integral on y axis
data_pulseheight_detector), # The pulseheight on x axis
axis=-1) # To get the direction correct
# Initiate a 2D histogram (ROOT style)
histogram_combo_detector = rootpy.plotting.Hist2D(100, 0, 150000, 100, 0, 4500)
# Fill the Histogram
histogram_combo_detector.fill_array(data_combo)
# Plot the histogram with logarithmic colors in correct place
if detector == 0:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax1)
elif detector == 1:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax2)
elif detector == 2:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax3)
elif detector == 3:
root2matplotlib.hist2d(histogram_combo_detector, norm=LogNorm(), axes=ax4)
# Save the file
figure_combo.savefig(
'pmt_saturation_s%d.pdf' %station) # Name of the file
# Close the figure
plt.close(figure_combo)
# Now we go to the next detector
detector +1
print "####### I'm Done Bitches! #######"
################################## FOOTER ##################################
"""
Clean up shit
"""
| gpl-3.0 |
ryanraaum/african-mtdna | popdata_sources/lippold2014/process.py | 1 | 1543 | from oldowan.mtconvert import seq2sites, sites2seq, str2sites
from oldowan.fasta import fasta
from string import translate
import pandas as pd
import sys
sys.path.append('../../scripts')
from utils import *
## load metadata
metadata = pd.read_csv('metadata.csv', index_col=0)
region = range2region(metadata.ix[0, 'SeqRange'])
## load sample info
sinfo = pd.read_csv('HGDP_info.csv', index_col=0)
newindices = ['HGDP' + str(x).zfill(5) for x in sinfo.index]
sinfo['hgdpid'] = newindices
sinfo = sinfo.set_index('hgdpid')
ff = fasta('hgdp_africa.fasta', 'r')
data = ff.readentries()
ff.close()
hids = []
sites = []
for entry in data:
words = entry['name'].split()
hids.append(words[4])
sites.append(seq2sites(entry['sequence']))
# three sequences have an 'N' at around 309 that breaks validation
# this will be treated as a heteroplasy of the T there and ignored
skip = [64, 67, 73]
# validate
passed_validation = True
for i in range(len(sites)):
if i not in skip:
seq1 = data[i]['sequence'].upper()
if not seq1 == translate(sites2seq(sites[i], region), None, '-'):
passed_validation = False
print i, hids[i]
counter = {}
for k in metadata.index:
counter[k] = 0
if passed_validation:
with open('processed.csv', 'w') as f:
for i in range(len(sites)):
hid = hids[i]
key = sinfo.ix[hid,'PopulationName']
prefix = metadata.ix[key,'NewPrefix']
counter[key] += 1
newid = prefix + str(counter[key]).zfill(3)
mysites = ' '.join([str(x) for x in sites[i]])
f.write('%s,%s,%s\n' % (newid, hid, mysites)) | cc0-1.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/scipy/signal/ltisys.py | 7 | 116413 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <[email protected]>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from scipy._lib.six import xrange
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant(object):
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(LinearTimeInvariant, self).__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([ 3., 4.]),
array([ 1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super(lti, self).__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([ 3., 4.]),
array([ 1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super(dlti, self).__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
Contruct the transfer function with a sampling time of 0.1 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(TransferFunction, self).__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function with a sampling time of 0.5 seconds:
.. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super(ZerosPolesGain, self).__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
>>> from scipy import signal
Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4)
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super(StateSpace, self).__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(s) = 5 / (s-1)^3
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Transfer function: H(z) = 1 / (z^2 + 2z + 3)
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| mit |
lordkman/burnman | burnman/output_seismo.py | 3 | 11627 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import numpy as np
import warnings
import scipy.integrate
import matplotlib.pyplot as plt
import pkgutil
from . import tools
from . import constants
from . import seismic
from . import geotherm
def write_axisem_input(rock, min_depth=670.e3, max_depth=2890.e3, T0= 1900, filename='axisem_burnmantestrock.txt',
axisem_ref='axisem_prem_ani_noocean.txt', plotting=False):
"""
Writing velocities and densities to AXISEM (www.axisem.info) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='axisem_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'axisem_prem_ani_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data('burnman', 'data/input_seismic/' + axisem_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in lines[18:]:
numbers = np.fromstring(line, sep=' ')
if len(numbers)>0:
if line[0] != "#" and line[0] != "%":
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Axisem reference file (currently the lower mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is an input parameter!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
discontinuity =0
# WRITE OUT FILE
f = open(filename, 'w')
print('Writing ' + filename + ' ...')
f.write('# Input file '+ filename +' for AXISEM created using BurnMan, replacing ' + axisem_ref+ ' between ' +str(np.round(min_depth/1.e3)) + ' and ' + str(np.round(max_depth /1.e3)) +' km \n')
f.write('NAME ' + filename + '\n')
for line in lines[2:18]:
f.write(line[:-1] + '\n')
for i in range(indrange[0]):
if i>0 and ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2)) +' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 + i - indrange[0]
if ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity '+ str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2))+' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], rock_rho[ind2], rock_vp[ind2], rock_vs[ind2], ref_Qk[i],
ref_Qmu[i], rock_vp[ind2], rock_vs[ind2], ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
if ref_radius[i] ==ref_radius[i-1]:
discontinuity = discontinuity + 1
f.write('# Discontinuity ' +str(discontinuity) + ', depth: '+ str(np.round(ref_depth[i]/1.e3,decimals=2))+' km \n')
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(ref_depth / 1.e3, ref_vph / 1.e3, color='g', linestyle='-', label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(ref_depth / 1.e3, ref_vsh / 1.e3, color='b', linestyle='-', label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(ref_depth / 1.e3, ref_density / 1.e3, color='r', linestyle='-', label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + axisem_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
def write_mineos_input(rock, min_depth=670.e3, max_depth=2890.e3, T0 = 1900, filename='mineos_burnmantestrock.txt',
mineos_ref='mineos_prem_noocean.txt', plotting=False):
"""
Writing velocities and densities to Mineos (https://geodynamics.org/cig/software/mineos/) input file
Default is set to replacing the lower mantle with the BurnMan rock
Note:
- This implementation uses PREM to convert from depths to pressures to compute at
- This implementation assumes an adiabatic temperature profile, only T0 at min_depth can be set
- Currently, it only honors the discontinuities already in the synthetic input file, so it is best
to only replace certain layers with burnman values (this should be improved in the future).
Parameters
----------
rock : burnman.Composite()
Composition to implement in the model
min_depth : float
minimum depth to replace model (m) (default = 670 km)
max_depth : float
minimum depth to replace model (m) (default = 2890 km)
T0 : float
Anchor temperature at min_depth for adiabatic profile (K) (default=1900)
filename: string
Output filename (default ='mineos_burnmantestrock.txt')
axisem_ref: string
Input filename (in burnman/data/input_seismic/) (default = 'mineos_prem_noocean.txt')
plotting: Boolean
True means plot of the old model and replaced model will be shown (default = False)
"""
# Load reference input
datastream = pkgutil.get_data('burnman', 'data/input_seismic/' + mineos_ref)
lines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table=[]
for line in lines[3:]:
numbers = np.fromstring(line, sep=' ')
table.append(numbers)
table = np.array(table)
ref_radius = table[:, 0]
ref_depth = 6371.e3 - ref_radius
ref_density = table[:, 1]
ref_vpv = table[:, 2]
ref_vsv = table[:, 3]
ref_Qk = table[:, 4]
ref_Qmu = table[:, 5]
ref_vph = table[:, 6]
ref_vsh = table[:, 7]
ref_eta = table[:, 8]
# Cutting out range to input in Mineos (currently the lower mantle)
indrange = [x for x in range(len(ref_depth)) if ref_depth[
x] > min_depth and ref_depth[x] < max_depth]
# pad both ends to include up to discontinuity, bit of a hack...
indrange.insert(0, indrange[0] - 1)
indrange.append(indrange[-1] + 1)
# Invert depthrange so adiabatic computations work!
depthrange = ref_depth[indrange][::-1]
# convert depths to pressures
pressures = seismic.PREM().pressure(depthrange)
# Computing adiabatic temperatures. T0 is a choice!
T0 = T0 # K
temperatures = geotherm.adiabatic(pressures, T0, rock)
print("Calculations are done for:")
rock.debug_print()
rock_vp, rock_vs, rock_rho = rock.evaluate(
['v_p', 'v_s', 'density'], pressures, temperatures)
# WRITE OUT FILE
f = open(filename , 'w')
print('Writing ' + filename + ' ...')
f.write(lines[0][:-2] + ' + ' + filename + '\n')
for line in lines[1:3]:
f.write(line[:-2] + '\n')
for i in range(indrange[0]):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
for i in range(indrange[0], indrange[-1]):
ind2 = -1 - i + indrange[0]
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], rock_rho[ind2], rock_vp[ind2], rock_vs[ind2], ref_Qk[i],
ref_Qmu[i], rock_vp[ind2], rock_vs[ind2], ref_eta[i]))
for i in range(indrange[-1], len(ref_radius)):
f.write(
'%8.0f %9.2f %9.2f %9.2f %9.1f %9.1f %9.2f %9.2f %9.5f \n' %
(ref_radius[i], ref_density[i], ref_vpv[i], ref_vsv[i], ref_Qk[i],
ref_Qmu[i], ref_vph[i], ref_vsh[i], ref_eta[i]))
f.close()
if plotting:
# plot vp
plt.plot(ref_depth / 1.e3, ref_vph / 1.e3, color='g', linestyle='-', label='vp')
plt.plot(depthrange / 1.e3, rock_vp / 1.e3, color='g', linestyle='-',
marker='o', markerfacecolor='g', markersize=1)
# plot Vs
plt.plot(ref_depth / 1.e3, ref_vsh / 1.e3, color='b', linestyle='-', label='vs')
plt.plot(depthrange / 1.e3, rock_vs / 1.e3, color='b', linestyle='-',
marker='o', markerfacecolor='b', markersize=1)
# plot density
plt.plot(ref_depth / 1.e3, ref_density / 1.e3, color='r', linestyle='-', label='density')
plt.plot(depthrange / 1.e3, rock_rho / 1.e3, color='r', linestyle='-',
marker='o', markerfacecolor='r', markersize=1)
plt.title(filename + ' = ' + mineos_ref + ' replaced between ' +
str(min_depth / 1.e3) + ' and ' + str(max_depth / 1.e3) + ' km')
plt.legend(loc='lower right')
plt.show()
| gpl-2.0 |
HeraclesHX/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rosswhitfield/mantid | scripts/Engineering/gui/engineering_diffraction/tabs/focus/model.py | 3 | 17365 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import csv
from os import path, makedirs
from matplotlib import gridspec
import matplotlib.pyplot as plt
from Engineering.gui.engineering_diffraction.tabs.common import vanadium_corrections, path_handling
from Engineering.gui.engineering_diffraction.settings.settings_helper import get_setting
from Engineering.EnggUtils import create_custom_grouping_workspace
from mantid.simpleapi import logger, AnalysisDataService as Ads, SaveNexus, SaveGSS, SaveFocusedXYE, \
Load, NormaliseByCurrent, Divide, DiffractionFocussing, RebinToWorkspace, DeleteWorkspace, ApplyDiffCal, \
ConvertUnits, ReplaceSpecialValues
SAMPLE_RUN_WORKSPACE_NAME = "engggui_focusing_input_ws"
FOCUSED_OUTPUT_WORKSPACE_NAME = "engggui_focusing_output_ws_bank_"
CALIB_PARAMS_WORKSPACE_NAME = "engggui_calibration_banks_parameters"
NORTH_BANK_CAL = "EnginX_NorthBank.cal"
SOUTH_BANK_CAL = "EnginX_SouthBank.cal"
class FocusModel(object):
def __init__(self):
self._last_path = None
self._last_path_ws = None
def get_last_path(self):
return self._last_path
def focus_run(self, sample_paths, banks, plot_output, instrument, rb_num, spectrum_numbers, custom_cal):
"""
Focus some data using the current calibration.
:param sample_paths: The paths to the data to be focused.
:param banks: The banks that should be focused.
:param plot_output: True if the output should be plotted.
:param instrument: The instrument that the data came from.
:param rb_num: The experiment number, used to create directories. Can be None
:param spectrum_numbers: The specific spectra that should be focused. Used instead of banks.
:param custom_cal: User defined calibration file to crop the focus to
"""
full_calib_path = get_setting(path_handling.INTERFACES_SETTINGS_GROUP,
path_handling.ENGINEERING_PREFIX, "full_calibration")
if not Ads.doesExist("full_inst_calib"):
try:
full_calib_workspace = Load(full_calib_path, OutputWorkspace="full_inst_calib")
except RuntimeError:
logger.error("Error loading Full instrument calibration - this is set in the interface settings.")
return
else:
full_calib_workspace = Ads.retrieve("full_inst_calib")
if not Ads.doesExist(vanadium_corrections.INTEGRATED_WORKSPACE_NAME) and not Ads.doesExist(
vanadium_corrections.CURVES_WORKSPACE_NAME):
return
integration_workspace = Ads.retrieve(vanadium_corrections.INTEGRATED_WORKSPACE_NAME)
curves_workspace = Ads.retrieve(vanadium_corrections.CURVES_WORKSPACE_NAME)
output_workspaces = [] # List of collated workspaces to plot.
df_kwarg, name, region_calib = None, None, None
if spectrum_numbers:
inst_ws = path_handling.load_workspace(sample_paths[0])
grp_ws = create_custom_grouping_workspace(spectrum_numbers, inst_ws)
df_kwarg = {"GroupingWorkspace": grp_ws}
region_calib = "engggui_calibration_Cropped"
name = 'Cropped'
elif custom_cal:
# TODO this functionality has not yet been fully implemented
df_kwarg = {"GroupingFileName": custom_cal}
region_calib = "engggui_calibration_Custom"
name = 'Custom'
if df_kwarg:
# check correct region calibration exists
if not Ads.doesExist(region_calib):
logger.warning(f"Cannot focus as the region calibration workspace \"{region_calib}\" is not "
f"present.")
return
for sample_path in sample_paths:
sample_workspace = path_handling.load_workspace(sample_path)
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
tof_output_name = str(run_no) + "_" + FOCUSED_OUTPUT_WORKSPACE_NAME + name
dspacing_output_name = tof_output_name + "_dSpacing"
# perform prefocus operations on whole instrument workspace
prefocus_success = self._whole_inst_prefocus(sample_workspace, integration_workspace,
full_calib_workspace)
if not prefocus_success:
continue
# perform focus over chosen region of interest
self._run_focus(sample_workspace, tof_output_name, curves_workspace, df_kwarg, region_calib)
output_workspaces.append([tof_output_name])
self._save_output(instrument, sample_path, "Cropped", tof_output_name, rb_num)
self._save_output(instrument, sample_path, "Cropped", dspacing_output_name, rb_num, unit="dSpacing")
self._output_sample_logs(instrument, run_no, sample_workspace, rb_num)
# remove created grouping workspace if present
if Ads.doesExist("grp_ws"):
DeleteWorkspace("grp_ws")
else:
for sample_path in sample_paths:
sample_workspace = path_handling.load_workspace(sample_path)
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
workspaces_for_run = []
# perform prefocus operations on whole instrument workspace
prefocus_success = self._whole_inst_prefocus(sample_workspace, integration_workspace,
full_calib_workspace)
if not prefocus_success:
continue
# perform focus over chosen banks
for name in banks:
tof_output_name = str(run_no) + "_" + FOCUSED_OUTPUT_WORKSPACE_NAME + str(name)
dspacing_output_name = tof_output_name + "_dSpacing"
if name == '1':
df_kwarg = {"GroupingFileName": NORTH_BANK_CAL}
region_calib = "engggui_calibration_bank_1"
else:
df_kwarg = {"GroupingFileName": SOUTH_BANK_CAL}
region_calib = "engggui_calibration_bank_2"
# check correct region calibration exists
if not Ads.doesExist(region_calib):
logger.warning(f"Cannot focus as the region calibration workspace \"{region_calib}\" is not "
f"present.")
return
self._run_focus(sample_workspace, tof_output_name, curves_workspace, df_kwarg, region_calib)
workspaces_for_run.append(tof_output_name)
# Save the output to the file system.
self._save_output(instrument, sample_path, name, tof_output_name, rb_num)
self._save_output(instrument, sample_path, name, dspacing_output_name, rb_num, unit="dSpacing")
output_workspaces.append(workspaces_for_run)
self._output_sample_logs(instrument, run_no, sample_workspace, rb_num)
DeleteWorkspace(sample_workspace)
# Plot the output
if plot_output:
for ws_names in output_workspaces:
self._plot_focused_workspaces(ws_names)
@staticmethod
def _whole_inst_prefocus(input_workspace,
vanadium_integration_ws,
full_calib) -> bool:
"""This is used to perform the operations done on the whole instrument workspace, before the chosen region of
interest is focused using _run_focus
:param input_workspace: Raw sample run to process prior to focussing over a region of interest
:param vanadium_integration_ws: Integral of the supplied vanadium run
:param full_calib: Full instrument calibration workspace (table ws output from PDCalibration)
:return True if successful, False if aborted
"""
if input_workspace.getRun().getProtonCharge() > 0:
NormaliseByCurrent(InputWorkspace=input_workspace, OutputWorkspace=input_workspace)
else:
logger.warning(f"Skipping focus of run {input_workspace.name()} because it has invalid proton charge.")
return False
input_workspace /= vanadium_integration_ws
# replace nans created in sensitivity correction
ReplaceSpecialValues(InputWorkspace=input_workspace, OutputWorkspace=input_workspace, NaNValue=0,
InfinityValue=0)
ApplyDiffCal(InstrumentWorkspace=input_workspace, CalibrationWorkspace=full_calib)
ConvertUnits(InputWorkspace=input_workspace, OutputWorkspace=input_workspace, Target='dSpacing')
return True
@staticmethod
def _run_focus(input_workspace,
tof_output_name,
vanadium_curves_ws,
df_kwarg,
region_calib) -> None:
"""Focus the processed full instrument workspace over the chosen region of interest
:param input_workspace: Processed full instrument workspace converted to dSpacing
:param tof_output_name: Name for the time-of-flight output workspace
:param vanadium_curves_ws: Workspace containing the vanadium curves
:param df_kwarg: kwarg to pass to DiffractionFocussing specifying the region of interest
:param region_calib: Region of interest calibration workspace (table ws output from PDCalibration)
"""
# rename workspace prior to focussing to avoid errors later
dspacing_output_name = tof_output_name + "_dSpacing"
# focus over specified region of interest
focused_sample = DiffractionFocussing(InputWorkspace=input_workspace, OutputWorkspace=dspacing_output_name,
**df_kwarg)
curves_rebinned = RebinToWorkspace(WorkspaceToRebin=vanadium_curves_ws, WorkspaceToMatch=focused_sample)
Divide(LHSWorkspace=focused_sample, RHSWorkspace=curves_rebinned, OutputWorkspace=focused_sample,
AllowDifferentNumberSpectra=True)
# apply calibration from specified region of interest
ApplyDiffCal(InstrumentWorkspace=focused_sample, CalibrationWorkspace=region_calib)
# set bankid for use in fit tab
run = focused_sample.getRun()
if region_calib == "engggui_calibration_bank_1":
run.addProperty("bankid", 1, True)
elif region_calib == "engggui_calibration_bank_2":
run.addProperty("bankid", 2, True)
else:
run.addProperty("bankid", 3, True)
# output in both dSpacing and TOF
ConvertUnits(InputWorkspace=focused_sample, OutputWorkspace=tof_output_name, Target='TOF')
DeleteWorkspace(curves_rebinned)
@staticmethod
def _plot_focused_workspaces(focused_workspaces):
fig = plt.figure()
gs = gridspec.GridSpec(1, len(focused_workspaces))
plots = [
fig.add_subplot(gs[i], projection="mantid") for i in range(len(focused_workspaces))
]
for ax, ws_name in zip(plots, focused_workspaces):
ax.plot(Ads.retrieve(ws_name), wkspIndex=0)
ax.set_title(ws_name)
fig.show()
def _save_output(self, instrument, sample_path, bank, sample_workspace, rb_num, unit="TOF"):
"""
Save a focused workspace to the file system. Saves separate copies to a User directory if an rb number has
been set.
:param instrument: The instrument the data is from.
:param sample_path: The path to the data file that was focused.
:param bank: The name of the bank being saved.
:param sample_workspace: The name of the workspace to be saved.
:param rb_num: Usually an experiment id, defines the name of the user directory.
"""
self._save_focused_output_files_as_nexus(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
self._save_focused_output_files_as_gss(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
self._save_focused_output_files_as_topas_xye(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
output_path = path.join(path_handling.get_output_path(), 'Focus')
logger.notice(f"\n\nFocus files saved to: \"{output_path}\"\n\n")
if rb_num:
output_path = path.join(path_handling.get_output_path(), 'User', rb_num, 'Focus')
logger.notice(f"\n\nFocus files also saved to: \"{output_path}\"\n\n")
self._last_path = output_path
if self._last_path and self._last_path_ws:
self._last_path = path.join(self._last_path, self._last_path_ws)
def _save_focused_output_files_as_gss(self, instrument, sample_path, bank, sample_workspace,
rb_num, unit):
gss_output_path = path.join(
path_handling.get_output_path(), "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".gss"))
SaveGSS(InputWorkspace=sample_workspace, Filename=gss_output_path)
if rb_num:
gss_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".gss"))
SaveGSS(InputWorkspace=sample_workspace, Filename=gss_output_path)
def _save_focused_output_files_as_nexus(self, instrument, sample_path, bank, sample_workspace,
rb_num, unit):
file_name = self._generate_output_file_name(instrument, sample_path, bank, unit, ".nxs")
nexus_output_path = path.join(path_handling.get_output_path(), "Focus", file_name)
SaveNexus(InputWorkspace=sample_workspace, Filename=nexus_output_path)
if rb_num:
nexus_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus", file_name)
SaveNexus(InputWorkspace=sample_workspace, Filename=nexus_output_path)
self._last_path_ws = file_name
def _save_focused_output_files_as_topas_xye(self, instrument, sample_path, bank,
sample_workspace, rb_num, unit):
xye_output_path = path.join(
path_handling.get_output_path(), "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".abc"))
SaveFocusedXYE(InputWorkspace=sample_workspace,
Filename=xye_output_path,
SplitFiles=False,
Format="TOPAS")
if rb_num:
xye_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".abc"))
SaveFocusedXYE(InputWorkspace=sample_workspace,
Filename=xye_output_path,
SplitFiles=False,
Format="TOPAS")
@staticmethod
def _output_sample_logs(instrument, run_number, workspace, rb_num):
def write_to_file():
with open(output_path, "w", newline="") as logfile:
writer = csv.writer(logfile, ["Sample Log", "Avg Value"])
for log in output_dict:
writer.writerow([log, output_dict[log]])
output_dict = {}
sample_run = workspace.getRun()
log_names = sample_run.keys()
# Collect numerical sample logs.
for name in log_names:
try:
output_dict[name] = sample_run.getPropertyAsSingleValue(name)
except ValueError:
logger.information(f"Could not convert {name} to a numerical value. It will not be included in the "
f"sample logs output file.")
focus_dir = path.join(path_handling.get_output_path(), "Focus")
if not path.exists(focus_dir):
makedirs(focus_dir)
output_path = path.join(focus_dir, (instrument + "_" + run_number + "_sample_logs.csv"))
write_to_file()
if rb_num:
focus_user_dir = path.join(path_handling.get_output_path(), "User", rb_num, "Focus")
if not path.exists(focus_user_dir):
makedirs(focus_user_dir)
output_path = path.join(focus_user_dir, (instrument + "_" + run_number + "_sample_logs.csv"))
write_to_file()
@staticmethod
def _generate_output_file_name(instrument, sample_path, bank, unit, suffix):
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
return instrument + '_' + run_no + '_' + "bank_" + bank + '_' + unit + suffix
| gpl-3.0 |
davidgbe/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
fzalkow/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
potash/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
sonnyhu/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/utils/tests/test_random.py | 230 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
krez13/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
MDAnalysis/pmda | pmda/rms/rmsf.py | 1 | 8614 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# PMDA
# Copyright (c) 2019 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
"""
Calculating Root-Mean-Square Fluctuations (RMSF) --- :mod:`pmda.rmsf`
=====================================================================
This module contains parallel versions of analysis tasks in
:mod:`MDAnalysis.analysis.rms`.
.. autoclass:: RMSF
:members:
:inherited-members:
See Also
--------
MDAnalysis.analysis.rms.RMSF
"""
from __future__ import absolute_import, division
import numpy as np
from pmda.parallel import ParallelAnalysisBase
from pmda.util import fold_second_order_moments
class RMSF(ParallelAnalysisBase):
r"""Parallel RMSF analysis.
Calculates RMSF of given atoms across a trajectory.
Attributes
----------
rmsf : array
``N``-length :class:`numpy.ndarray` array of RMSF values,
where ``N`` is the number of atoms in the `atomgroup` of
interest. Returned values have units of ångströms.
Parameters
----------
atomgroup : AtomGroup
Atoms for which RMSF is calculated
Raises
------
ValueError
raised if negative values are calculated, which indicates that a
numerical overflow or underflow occured
See Also
--------
MDAnalysis.analysis.rms.RMSF
Notes
-----
No RMSD-superposition is performed; it is assumed that the user is
providing a trajectory where the protein of interest has been structurally
aligned to a reference structure (see the Examples section below). The
protein also has be whole because periodic boundaries are not taken into
account.
Run the analysis with :meth:`RMSF.run`, which stores the results
in the array :attr:`RMSF.rmsf`.
The root mean square fluctuation of an atom :math:`i` is computed as the
time average:
.. math::
\sigma_{i} = \sqrt{\left\langle (\mathbf{x}_{i} -
\langle\mathbf{x}_{i}\rangle)^2
\right\rangle}
No mass weighting is performed.
This method implements an algorithm for computing sums of squares while
avoiding overflows and underflows [Welford1962]_, as well as an algorithm
for combining the sum of squares and means of separate partitions of a
given trajectory to calculate the RMSF of the entire trajectory
[CGL1979]_.
For more details about RMSF calculations, refer to [Awtrey2019]_.
References
----------
.. [Welford1962] B. P. Welford (1962). "Note on a Method for
Calculating Corrected Sums of Squares and Products." Technometrics
4(3):419-420.
Examples
--------
In this example we calculate the residue RMSF fluctuations by analyzing
the :math:`\text{C}_\alpha` atoms. First we need to fit the trajectory
to the average structure as a reference. That requires calculating the
average structure first. Because we need to analyze and manipulate the
same trajectory multiple times, we are going to load it into memory
using the :mod:`~MDAnalysis.coordinates.MemoryReader`. (If your
trajectory does not fit into memory, you will need to :ref:`write out
intermediate trajectories <writing-trajectories>` to disk or
:ref:`generate an in-memory universe
<creating-in-memory-trajectory-label>` that only contains, say, the
protein)::
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.tests.datafiles import TPR, XTC
u = mda.Universe(TPR, XTC, in_memory=True)
protein = u.select_atoms("protein")
# TODO: Need to center and make whole (this test trajectory
# contains the protein being split across periodic boundaries
# and the results will be WRONG!)
# Fit to the initial frame to get a better average structure
# (the trajectory is changed in memory)
prealigner = align.AlignTraj(u, u, select="protein and name CA",
in_memory=True).run()
# ref = average structure
ref_coordinates = u.trajectory.timeseries(asel=protein).mean(axis=1)
# Make a reference structure (need to reshape into a
# 1-frame "trajectory").
ref = mda.Merge(protein).load_new(ref_coordinates[:, None, :],
order="afc")
We created a new universe ``reference`` that contains a single frame
with the averaged coordinates of the protein. Now we need to fit the
whole trajectory to the reference by minimizing the RMSD. We use
:class:`MDAnalysis.analysis.align.AlignTraj`::
aligner = align.AlignTraj(u, ref, select="protein and name CA",
in_memory=True).run()
# need to write the trajectory to disk for PMDA 0.3.0 (see issue #15)
with mda.Writer("rmsfit.xtc", n_atoms=u.atoms.n_atoms) as W:
for ts in u.trajectory:
W.write(u.atoms)
(For use with PMDA we cannot currently use a in-memory trajectory
(see `Issue #15`_) so we must write out the RMS-superimposed
trajectory to the file "rmsfit.xtc".)
The trajectory is now fitted to the reference (the RMSD is stored as
`aligner.rmsd` for further inspection). Now we can calculate the RMSF::
from pmda.rms import RMSF
u = mda.Universe(TPR, "rmsfit.xtc")
calphas = protein.select_atoms("protein and name CA")
rmsfer = RMSF(calphas).run()
and plot::
import matplotlib.pyplot as plt
plt.plot(calphas.resnums, rmsfer.rmsf)
.. versionadded:: 0.3.0
.. _`Issue #15`: https://github.com/MDAnalysis/pmda/issues/15
"""
def __init__(self, atomgroup):
u = atomgroup.universe
super(RMSF, self).__init__(u, (atomgroup, ))
self._atomgroup = atomgroup
self._top = u.filename
self._traj = u.trajectory.filename
def _single_frame(self, ts, atomgroups):
# mean and sum of squares calculations done in _reduce()
return atomgroups[0]
def _conclude(self):
"""
self._results : Array
(n_blocks x 2 x N x 3) array
"""
n_blocks = len(self._results)
# serial case
if n_blocks == 1:
# get length of trajectory slice
self.mean = self._results[0, 0]
self.sumsquares = self._results[0, 1]
self.rmsf = np.sqrt(self.sumsquares.sum(axis=1) / self.n_frames)
# parallel case
else:
mean = self._results[:, 0]
sos = self._results[:, 1]
# create list of (timesteps, mean, sumsq tuples for each block
vals = []
for i in range(n_blocks):
vals.append((len(self._blocks[i]), mean[i], sos[i]))
# combine block results using fold method
results = fold_second_order_moments(vals)
self.mean = results[1]
self.sumsquares = results[2]
self.rmsf = np.sqrt(self.sumsquares.sum(axis=1) / self.n_frames)
self._negative_rmsf(self.rmsf)
@staticmethod
def _reduce(res, result_single_frame):
"""
'sum' action for time series
"""
atoms = result_single_frame
positions = atoms.positions.astype(np.float64)
# initial time step case
if isinstance(res, list) and len(res) == 0:
# initial mean position = initial position
mean = positions
# create new zero-array for sum of squares to prevent blocks from
# using data from previous blocks
sumsq = np.zeros((atoms.n_atoms, 3))
# set initial time step for each block to zero
k = 0
# assign initial (sum of squares and mean) zero-arrays to res
res = [mean, sumsq, k]
else:
# update time step
k = res[2] + 1
# update sum of squares
res[1] += (k / (k + 1)) * (positions - res[0]) ** 2
# update mean
res[0] = (k * res[0] + positions) / (k + 1)
# update time step in res
res[2] = k
return res
@staticmethod
def _negative_rmsf(rmsf):
if not (rmsf >= 0).all():
raise ValueError("Some RMSF values negative; overflow " +
"or underflow occurred")
| gpl-2.0 |
nvoron23/scikit-learn | sklearn/preprocessing/label.py | 137 | 27165 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
voxlol/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
tmthydvnprt/compfipy | compfipy/models.py | 1 | 3612 | """
models.py
Various Stochastic models of the "market" that provide "fake" asset prices to test on.
"""
import math
import datetime
import pandas as pd
import numpy as np
from compfipy import market
# Common conversion functions used across all models
# ------------------------------------------------------------------------------------------------------------------------------
def convert_to_returns(log_returns=None):
"""
Convert log returns to normal returns.
"""
return np.exp(log_returns)
def convert_to_price(x0=1, log_returns=None):
"""
Convert log returns to normal returns and calculate value from initial price.
"""
returns = convert_to_returns(log_returns)
prices = pd.concat([pd.Series(x0), returns[:-1]], ignore_index=True)
return prices.cumprod()
# Stochastic Models
# ------------------------------------------------------------------------------------------------------------------------------
def brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2):
"""
Return asset price whose returnes evolve according to brownian motion.
"""
sqrt_delta_t_sigma = math.sqrt(delta_t) * sigma
log_returns = pd.Series(np.random.normal(loc=0, scale=sqrt_delta_t_sigma, size=time))
return log_returns
def geometric_brownian_motion(time=500, delta_t=(1.0 / 252.0), sigma=2, mu=0.5):
"""
Return asset price whose returnes evolve according to geometric brownian motion.
"""
wiener_process = brownian_motion(time, delta_t, sigma)
sigma_pow_mu_delta_t = (mu - 0.5 * math.pow(sigma, 2)) * delta_t
log_returns = wiener_process + sigma_pow_mu_delta_t
return log_returns
def jump_diffusion(time=500, delta_t=(1.0 / 252.0), mu=0.0, sigma=0.3, jd_lambda=0.1):
"""
Return jump diffusion process.
"""
s_n = 0
t = 0
small_lambda = -(1.0 / jd_lambda)
jump_sizes = pd.Series(np.zeros((time,)))
while s_n < time:
s_n += small_lambda * math.log(np.random.uniform(0, 1))
for j in xrange(0, time):
if t * delta_t <= s_n * delta_t <= (j+1) * delta_t:
jump_sizes[j] += np.random.normal(loc=mu, scale=sigma)
break
t += 1
return jump_sizes
def merton_jump_diffusion(time=500, delta_t=(1.0 / 252.0), sigma=2, gbm_mu=0.5, jd_mu=0.0, jd_sigma=0.3, jd_lambda=0.1):
"""
Return asset price whose returnes evolve according to geometric brownian motion with jump diffusion.
"""
jd = jump_diffusion(time, delta_t, jd_mu, jd_sigma, jd_lambda)
gbm = geometric_brownian_motion(time, delta_t, sigma, gbm_mu)
return gbm + jd
# Create standard EOD data from price data
# ------------------------------------------------------------------------------------------------------------------------------
def generate_ochlv(prices=None, ochl_mu=0.0, ochl_sigma=0.1, v_mu=100000, v_sigma=math.sqrt(10000)):
"""
Turn asset price into standard EOD data.
"""
date_rng = market.date_range(datetime.date.today(), periods=len(prices))
ochlv = pd.DataFrame({'Close':prices})
ochlv['Open'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['High'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['Low'] = prices + prices * np.random.normal(loc=ochl_mu, scale=ochl_sigma, size=prices.shape)
ochlv['Volume'] = v_mu * np.abs(prices.pct_change(2).shift(-2).ffill()) \
+ np.random.normal(loc=v_mu, scale=v_sigma, size=prices.shape)
ochlv = ochlv.set_index(date_rng)
return ochlv
| mit |
walterreade/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28856 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import warnings
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import FitFailedWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler)
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
ishank08/scikit-learn | sklearn/__init__.py | 28 | 3073 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.19.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
jaduimstra/nilmtk | nilmtk/dataset_converters/redd/convert_redd.py | 6 | 5462 | from __future__ import print_function, division
import pandas as pd
import numpy as np
from copy import deepcopy
from os.path import join, isdir, isfile
from os import listdir
import re
from sys import stdout
from nilmtk.utils import get_datastore
from nilmtk.datastore import Key
from nilmtk.timeframe import TimeFrame
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import get_module_directory, check_directory_exists
from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore
"""
TODO:
* The bottleneck appears to be CPU. So could be sped up by using
multiprocessing module to use multiple CPU cores to load REDD channels in
parallel.
"""
def convert_redd(redd_path, output_filename, format='HDF'):
"""
Parameters
----------
redd_path : str
The root path of the REDD low_freq dataset.
output_filename : str
The destination filename (including path and suffix).
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
def _redd_measurement_mapping_func(house_id, chan_id):
ac_type = 'apparent' if chan_id <= 2 else 'active'
return [('power', ac_type)]
# Open DataStore
store = get_datastore(output_filename, format, mode='w')
# Convert raw data to DataStore
_convert(redd_path, store, _redd_measurement_mapping_func, 'US/Eastern')
# Add metadata
save_yaml_to_datastore(join(get_module_directory(),
'dataset_converters',
'redd',
'metadata'),
store)
store.close()
print("Done converting REDD to HDF5!")
def _convert(input_path, store, measurement_mapping_func, tz, sort_index=True):
"""
Parameters
----------
input_path : str
The root path of the REDD low_freq dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
"""
check_directory_exists(input_path)
# Iterate though all houses and channels
houses = _find_all_houses(input_path)
for house_id in houses:
print("Loading house", house_id, end="... ")
stdout.flush()
chans = _find_all_chans(input_path, house_id)
for chan_id in chans:
print(chan_id, end=" ")
stdout.flush()
key = Key(building=house_id, meter=chan_id)
measurements = measurement_mapping_func(house_id, chan_id)
csv_filename = _get_csv_filename(input_path, key)
df = _load_csv(csv_filename, measurements, tz)
if sort_index:
df = df.sort_index() # raw REDD data isn't always sorted
store.put(str(key), df)
print()
def _find_all_houses(input_path):
"""
Returns
-------
list of integers (house instances)
"""
dir_names = [p for p in listdir(input_path) if isdir(join(input_path, p))]
return _matching_ints(dir_names, '^house_(\d)$')
def _find_all_chans(input_path, house_id):
"""
Returns
-------
list of integers (channels)
"""
house_path = join(input_path, 'house_{:d}'.format(house_id))
filenames = [p for p in listdir(house_path) if isfile(join(house_path, p))]
return _matching_ints(filenames, '^channel_(\d\d?).dat$')
def _matching_ints(strings, regex):
"""Uses regular expression to select and then extract an integer from
strings.
Parameters
----------
strings : list of strings
regex : string
Regular Expression. Including one group. This group is used to
extract the integer from each string.
Returns
-------
list of ints
"""
ints = []
p = re.compile(regex)
for string in strings:
m = p.match(string)
if m:
integer = int(m.group(1))
ints.append(integer)
ints.sort()
return ints
def _get_csv_filename(input_path, key_obj):
"""
Parameters
----------
input_path : (str) the root path of the REDD low_freq dataset
key_obj : (nilmtk.Key) the house and channel to load
Returns
-------
filename : str
"""
assert isinstance(input_path, str)
assert isinstance(key_obj, Key)
# Get path
house_path = 'house_{:d}'.format(key_obj.building)
path = join(input_path, house_path)
assert isdir(path)
# Get filename
filename = 'channel_{:d}.dat'.format(key_obj.meter)
filename = join(path, filename)
assert isfile(filename)
return filename
def _load_csv(filename, columns, tz):
"""
Parameters
----------
filename : str
columns : list of tuples (for hierarchical column index)
tz : str e.g. 'US/Eastern'
Returns
-------
dataframe
"""
# Load data
df = pd.read_csv(filename, sep=' ', names=columns,
dtype={m:np.float32 for m in columns})
# Modify the column labels to reflect the power measurements recorded.
df.columns.set_names(LEVEL_NAMES, inplace=True)
# Convert the integer index column to timezone-aware datetime
df.index = pd.to_datetime(df.index.values, unit='s', utc=True)
df = df.tz_convert(tz)
return df
| apache-2.0 |
trankmichael/scikit-learn | sklearn/datasets/svmlight_format.py | 114 | 15826 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
scattering/ipeek | server/plot_dcs.py | 1 | 3478 | # -*- coding: utf-8 -*-
import h5py
import simplejson
import os
import numpy as np
#import matplotlib.pyplot as plt
from time import time
def Elam(lam):
"""
convert wavelength in angstroms to energy in meV
"""
return 81.81/lam**2
def Ek(k):
"""
convert wave-vector in inver angstroms to energy in meV
"""
return 2.072*k**2
def kE(E):
return np.sqrt(E/2.072)
def Qfunc(ki, kf, theta):
"""
evaluate the magnitude of Q from ki, kf, and theta
theta is the angle between kf and ki, sometimes called 2 theta, units of degrees
"""
return np.sqrt( ki**2 + kf**2 - 2*ki*kf*np.cos(theta*np.pi/180) )
def Ef_from_timechannel(timeChannel, t_SD_min, speedRatDenom, masterSpeed):
"""
using the parameters
t_SD_min = minimum sample to detector time
speedRatDenom = to set FOL chopper speed
masterSpeed = chopper speed (except for FOL chopper)
using the variabl
timeChannel, where I am numbering from 1 <be careful of this convention>
"""
return 8.41e7 / (t_SD_min + (timeChannel+1)* (6e4 *(speedRatDenom/masterSpeed)) )**2
def process_raw_dcs(data_path):
# t0 = time()
os.chdir(data_path) # change working directory
detInfo = np.genfromtxt('dcs_detector_info.txt', skip_header=1, skip_footer=17)
detToTwoTheta = detInfo[:,9] # 10th column
os.system('gzip -dc livedata.dcs.gz > livedata.dcs')
#os.system('C:\\Software\\Octave-3.6.4\\bin\\octave --eval "load livedata.dcs; save -hdf5 livedata.hdf;"')
os.system('octave --eval "load livedata.dcs; save -hdf5 livedata.hdf;"')
f = h5py.File('livedata.hdf')
data = f['histodata']['value'].value
ch_wl = f['ch_wl']['value'].value
Ei = Elam(ch_wl)
ki = kE(Ei)
dE =0.5*(-0.10395+0.05616 *Ei+0.00108 *Ei**2) #take the putative resolution and halve it
masterSpeed = f['ch_ms']['value'].value
speedRatDenom = f['ch_srdenom']['value'].value
t_SD_min = f['tsdmin']['value'].value
Q_max = Qfunc(ki,ki,150)
Q_min = 0
E_bins = np.linspace(-Ei, Ei, int(2*Ei/dE) )
Q_bins = np.linspace(Q_min,Q_max,301)
#for every point in {timechannel, detectorchannel} space, map into a bin of {E,Q} space
#remember, data is organized as data[detectorchannel][timechannel]
i,j = np.indices(data.shape)
ef = Ef_from_timechannel(j, t_SD_min, speedRatDenom, masterSpeed)
Q_ = Qfunc(ki, kE(ef), detToTwoTheta[:, None])
E_transfer = Ei-ef
E_mask = (E_transfer > -Ei)
EQ_data, xedges, yedges = np.histogram2d(Q_[E_mask], E_transfer[E_mask], bins=(Q_bins, E_bins), range=([Q_min,Q_max], [-Ei, Ei]), weights=data[E_mask])
stop_date = ''.join(chr(a) for a in f['stop_date']['value'].value.flatten())
start_date = ''.join(chr(a) for a in f['start_date']['value'].value.flatten())
output = {
"title": "DCS snapshot",
"dims": {
"ymin": -Ei,
"ymax": Ei,
"ydim": EQ_data.shape[1],
"xmin": 0,
"xmax": Q_max,
"xdim": EQ_data.shape[0],
"zmin": EQ_data.min(),
"zmax": EQ_data.max()
},
"type": "2d",
"ylabel": "Ei-Ef [meV]",
"xlabel": "|Q| [Å⁻¹]",
"z": [EQ_data.T.tolist()],
"options": {},
"metadata": {
"stop_date": stop_date,
"start_date": start_date
}
}
#print time()-t0
return simplejson.dumps([output])
| unlicense |
ebernhardson/l2r | code/bench_formats.py | 1 | 1186 | import pandas as pd
import feather
import os
import timeit
import config
from utils import table_utils
df = table_utils._read(config.ALL_DATA)
FILE_HDF = os.path.join(config.TMP_DIR, 'test.h5')
FILE_PICKLE = os.path.join(config.TMP_DIR, 'test.pkl')
FILE_FEATHER = os.path.join(config.TMP_DIR, 'test.feather')
def test_hdf_write():
df.to_hdf(FILE_HDF, 'test', mode='w')
def test_hdf_read():
pd.read_hdf(FILE_HDF, 'test')
def test_pickle_write():
df.to_pickle(FILE_PICKLE)
def test_pickle_read():
pd.read_pickle(FILE_PICKLE)
def test_feather_write():
feather.write_dataframe(df.copy(), FILE_FEATHER)
def test_feather_read():
feather.read_dataframe(FILE_FEATHER)
def test(func):
took = timeit.timeit("%s()" % (func.__name__), setup="from __main__ import %s" % (func.__name__), number=3)
print "%s: %.3f" % (func.__name__, took)
if __name__ == "__main__":
res = []
res.append(test(test_hdf_write))
res.append(test(test_hdf_read))
res.append(test(test_pickle_write))
res.append(test(test_pickle_read))
res.append(test(test_feather_write))
res.append(test(test_feather_read))
print "\n\n\n"
print "\n".join(res)
| mit |
msultan/msmbuilder | setup.py | 3 | 7479 | """MSMBuilder: Statistical models for Biomolecular Dynamics
"""
from __future__ import print_function, absolute_import
DOCLINES = __doc__.split("\n")
import sys
import traceback
import numpy as np
from os.path import join as pjoin
from setuptools import setup, Extension, find_packages
try:
sys.dont_write_bytecode = True
sys.path.insert(0, '.')
from basesetup import write_version_py, CompilerDetection, \
check_dependencies
finally:
sys.dont_write_bytecode = False
try:
import mdtraj
mdtraj_capi = mdtraj.capi()
except (ImportError, AttributeError):
print('=' * 80)
print('MDTraj version 1.1.X or later is required')
print('=' * 80)
traceback.print_exc()
sys.exit(1)
if '--debug' in sys.argv:
sys.argv.remove('--debug')
DEBUG = True
else:
DEBUG = False
if '--disable-openmp' in sys.argv:
sys.argv.remove('--disable-openmp')
DISABLE_OPENMP = True
else:
DISABLE_OPENMP = False
try:
import Cython
from Cython.Distutils import build_ext
if Cython.__version__ < '0.18':
raise ImportError()
except ImportError:
print(
'Cython version 0.18 or later is required. Try "conda install cython"')
sys.exit(1)
# #########################
VERSION = '3.9.0.dev0'
ISRELEASED = False
__version__ = VERSION
# #########################
CLASSIFIERS = """\
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
Programming Language :: C++
Programming Language :: Python
Development Status :: 5 - Production/Stable
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
"""
if any(cmd in sys.argv for cmd in ('install', 'build', 'develop')):
check_dependencies((
('numpy',),
('scipy',),
('pandas',),
('six',),
('mdtraj',),
('sklearn', 'scikit-learn'),
('numpydoc',),
('tables', 'pytables'),
))
# Where to find extensions
MSMDIR = 'msmbuilder/msm/'
HMMDIR = 'msmbuilder/hmm/'
CLUSTERDIR = 'msmbuilder/cluster/'
compiler = CompilerDetection(DISABLE_OPENMP)
with open('msmbuilder/src/config.pxi', 'w') as f:
f.write('''
DEF DEBUG = {debug}
DEF OPENMP = {openmp}
'''.format(openmp=compiler.openmp_enabled, debug=DEBUG))
extensions = []
extensions.append(
Extension('msmbuilder.example_datasets._muller',
sources=[pjoin('msmbuilder', 'example_datasets', '_muller.pyx')],
include_dirs=[np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._markovstatemodel',
sources=[pjoin(MSMDIR, '_markovstatemodel.pyx'),
pjoin(MSMDIR, 'src/transmat_mle_prinz.c')],
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.tests.test_cyblas',
sources=['msmbuilder/tests/test_cyblas.pyx'],
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._ratematrix',
sources=[pjoin(MSMDIR, '_ratematrix.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.decomposition._speigh',
sources=[pjoin('msmbuilder', 'decomposition', '_speigh.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._metzner_mcmc_fast',
sources=[pjoin(MSMDIR, '_metzner_mcmc_fast.pyx'),
pjoin(MSMDIR, 'src/metzner_mcmc.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_openmp,
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.libdistance',
language='c++',
sources=['msmbuilder/libdistance/libdistance.pyx'],
# msvc needs to be told "libtheobald", gcc wants just "theobald"
libraries=['%stheobald' % ('lib' if compiler.msvc else '')],
include_dirs=["msmbuilder/libdistance/src",
mdtraj_capi['include_dir'], np.get_include()],
library_dirs=[mdtraj_capi['lib_dir']],
))
extensions.append(
Extension('msmbuilder.cluster._kmedoids',
language='c++',
sources=[pjoin(CLUSTERDIR, '_kmedoids.pyx'),
pjoin(CLUSTERDIR, 'src', 'kmedoids.cc')],
include_dirs=[np.get_include()]))
# To get debug symbols on Windows, use
# extra_link_args=['/DEBUG']
# extra_compile_args=['/Zi']
extensions.append(
Extension('msmbuilder.hmm.gaussian',
language='c++',
sources=[pjoin(HMMDIR, 'gaussian.pyx'),
pjoin(HMMDIR, 'src/GaussianHMMFitter.cpp')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/')]))
extensions.append(
Extension('msmbuilder.hmm.vonmises',
language='c++',
sources=[pjoin(HMMDIR, 'vonmises.pyx'),
pjoin(HMMDIR, 'src/VonMisesHMMFitter.cpp'),
pjoin(HMMDIR, 'cephes/i0.c'),
pjoin(HMMDIR, 'cephes/chbevl.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/'),
pjoin(HMMDIR, 'cephes/')]))
write_version_py(VERSION, ISRELEASED, filename='msmbuilder/version.py')
setup(name='msmbuilder',
author='Robert McGibbon',
author_email='[email protected]',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
url='https://github.com/msmbuilder/msmbuilder',
platforms=['Linux', 'Mac OS-X', 'Unix'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={
'msmbuilder.tests': ['workflows/*'],
'msmbuilder': ['project_templates/*.*',
'project_templates/*/*',
'io_templates/*',
],
},
entry_points={'console_scripts':
['msmb = msmbuilder.scripts.msmb:main']},
zip_safe=False,
ext_modules=extensions,
cmdclass={'build_ext': build_ext})
| lgpl-2.1 |
casselineau/Tracer | tracer/surface.py | 1 | 7040 | # Define some basic surfaces for use with the ray tracer. From this minimal
# hierarchy other surfaces should be derived to implement actual geometric
# operations.
#
# References:
# [1] John J. Craig, Introduction to Robotics, 3rd ed., 2005.
import numpy as N
from has_frame import HasFrame
class Surface(HasFrame):
"""
Defines the base of surfaces that interact with rays.
"""
def __init__(self, geometry, optics, location=None, rotation=None, fixed_color=False):
"""
Arguments:
geometry - a GeometryManager object responsible for finding ray
intersections with the surface.
optics - a callable that gets the geometry manageri, bundle and
selector, and returns the outgoing ray bundle generated by the
geometry and bundle.
location, rotation - passed directly to the HasFrame constructor.
fixed_color - For rendering purposed. If a tuple of normalised RGB or RGBa is given,
the geometry will be of that color in the rendering.
"""
HasFrame.__init__(self, location, rotation)
self._geom = geometry
self._opt = optics
self._fixed_color = fixed_color
if fixed_color:
self._fixed_color = fixed_color[:3]
if len(fixed_color) == 4:
self._transparency = fixed_color[-1]
else:
self._transparency = 0
def get_optics_manager(self):
"""
Returns the optics-manager callable. May be useful for introspection.
Note that it is a read-only attribute.
"""
return self._opt
def get_geometry_manager(self):
"""
Returns the geometry-manager instance. May be useful for introspection.
Note that it is a read-only attribute.
"""
return self._geom
def register_incoming(self, ray_bundle):
"""
Records the incoming ray bundle, and uses the geometry manager to
return the parametric positions of intersection with the surface along
the ray.
Arguments:
ray_bundle - a RayBundle object with at-least its vertices and
directions specified.
Returns
A 1D array with the parametric position of intersection along each of
the rays. Rays that missed the surface return +infinity.
"""
self._current_bundle = ray_bundle
return self._geom.find_intersections(self._temp_frame, ray_bundle)
def select_rays(self, idxs):
"""
Informs the geometry manager that only the specified rays are to be
used henceforth.
Arguments:
idxs - an array with indices into the last registered ray bundle,
marking rays that will be used.
"""
self._selected = idxs
self._geom.select_rays(idxs)
def get_outgoing(self):
"""
Generates a new ray bundle, which is the reflections/refractions of the
user-selected rays out of the incoming ray-bundle that was previously
registered.
Returns:
a RayBundle object with the new bundle, with vertices on the surface
and directions according to optics laws.
"""
return self._opt(self._geom, self._current_bundle, self._selected)
def done(self):
"""
When this is called, the surface will no longer be queried on the
results of the latest trace iteration, so it can discard internal
data to relieve memory pressure.
"""
if hasattr(self, '_current_bundle'):
del self._current_bundle
self._geom.done()
def global_to_local(self, points):
"""
Transform a set of points in the global coordinates back into the frame
used during tracing.
Arguments:
points - a 3 x n array for n 3D points
returns:
local - a 3 x n array with the respective points in local coordinates.
"""
proj = N.round(N.linalg.inv(self._temp_frame), decimals=9)
return N.dot(proj, N.vstack((points, N.ones(points.shape[1]))))
def mesh(self, resolution):
"""
Represent the surface as a mesh in global coordinates.
Arguments:
resolution - in points per unit length (so the number of points
returned is O(A*resolution**2) for area A)
Returns:
x, y, z - each a 2D array holding in its (i,j) cell the x, y, and z
coordinate (respectively) of point (i,j) in the mesh.
"""
# The geometry manager has the local-coordinates mesh.
x, y, z = self._geom.mesh(resolution)
local = N.array((x, y, z, N.ones_like(x)))
glob = N.tensordot(self._temp_frame, local, axes=([1], [0]))
return glob[:3]
def get_scene_graph(self, resolution, fluxmap, trans, vmin, vmax):
"""
Any object that provides a nice QuadMesh from the previous code should be able to render in Coin3D with with the following...
"""
from pivy import coin
import matplotlib.cm as cm
from matplotlib import colors
n0 = self.get_scene_graph_transform()
o = self.get_optics_manager()
if self._fixed_color:
mat = coin.SoMaterial()
mat.diffuseColor = self._fixed_color
mat.specularColor = self._fixed_color
mat.transparency = (self._transparency)
n0.addChild(mat)
fluxmap = False
else:
if o.__class__.__name__[-10:] == 'Reflective':
mat = coin.SoMaterial()
mat.diffuseColor = (.5,.5,.5)
mat.specularColor = (.6,.6,.6)
mat.shininess = o._abs
n0.addChild(mat)
fluxmap = False
elif o.__class__.__name__ == 'PeriodicBoundary':
mat = coin.SoMaterial()
mat.ambientColor = (.0,.5,.5)
mat.transparency = (0.8)
n0.addChild(mat)
fluxmap = False
elif fluxmap != None:
if hasattr(o,'get_all_hits'):
hitdata = o.get_all_hits()
xyz = self.global_to_local(hitdata[1])[:3]
# plot the histogram into the scenegraph
g = self.get_geometry_manager()
if hasattr(g, 'get_fluxmap'):
flux = g.get_fluxmap(hitdata[0], xyz, resolution)
if not(hasattr(flux[0],'__len__')):
flux = [flux]
else:
fluxmap = False
else:
mat = coin.SoMaterial()
mat.diffuseColor = (0.2,0.2,0.2)
mat.specularColor = (0.2,0.2,0.2)
n0.addChild(mat)
fluxmap = False
meshes = self._geom.get_scene_graph(resolution)
for m in xrange(len(meshes)/3):
n = coin.SoSeparator()
X,Y,Z = meshes[3*m:3*m+3]
nr,nc = X.shape
A = [(X.flat[i],Y.flat[i],Z.flat[i]) for i in range(len(X.flat))]
coor = coin.SoCoordinate3()
coor.point.setValues(0, len(A), A)
n.addChild(coor)
qm = coin.SoQuadMesh()
qm.verticesPerRow = nc
qm.verticesPerColumn = nr
n.addChild(qm)
sh = coin.SoShapeHints()
sh.shapeType = coin.SoShapeHintsElement.UNKNOWN_SHAPE_TYPE
sh.vertexOrdering = coin.SoShapeHintsElement.COUNTERCLOCKWISE
sh.faceType = coin.SoShapeHintsElement.UNKNOWN_FACE_TYPE
n.addChild(sh)
if fluxmap:
# It works using n0 instead of n here but I have absolutely not clue why.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
M = cm.ScalarMappable(norm=norm, cmap=cm.viridis)
colormap = M.to_rgba(flux[m])
mat = coin.SoMaterial()
mat.ambientColor = (1,1,1)
mat.diffuseColor.setValues(0, colormap.shape[0], colormap)
if trans==True:
mat.transparency.setValues(0,colormap.shape[0], 1.-flux[m]/N.amax(flux[m]))
n0.addChild(mat)
mymatbind = coin.SoMaterialBinding()
mymatbind.value = coin.SoMaterialBinding.PER_FACE
n0.addChild(mymatbind)
n0.addChild(n)
return n0
| gpl-3.0 |
mo-g/iris | lib/iris/tests/test_mapping.py | 11 | 8113 | # (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Tests map creation.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import numpy.testing as np_testing
import cartopy.crs as ccrs
import iris
import iris.coord_systems
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
# A specific cartopy Globe matching the iris RotatedGeogCS default.
_DEFAULT_GLOBE = ccrs.Globe(semimajor_axis=6371229.0,
semiminor_axis=6371229.0,
ellipse=None)
@tests.skip_plot
class TestBasic(tests.GraphicsTest):
cube = iris.tests.stock.realistic_4d()
def test_contourf(self):
cube = self.cube[0, 0]
iplt.contourf(cube)
self.check_graphic()
def test_pcolor(self):
cube = self.cube[0, 0]
iplt.pcolor(cube)
self.check_graphic()
def test_unmappable(self):
cube = self.cube[0, 0]
cube.coord('grid_longitude').standard_name = None
iplt.contourf(cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5,
globe=_DEFAULT_GLOBE))
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(3.59579163e+02, 3.59669159e+02, -1.28250003e-01, -3.82499993e-02),
decimal=3)
@tests.skip_data
@tests.skip_plot
class TestUnmappable(tests.GraphicsTest):
def setUp(self):
src_cube = iris.tests.stock.global_pp()
# Make a cube that can't be located on the globe.
cube = iris.cube.Cube(src_cube.data)
cube.add_dim_coord(
iris.coords.DimCoord(np.arange(96, dtype=np.float32) * 100,
long_name='x', units='m'),
1)
cube.add_dim_coord(
iris.coords.DimCoord(np.arange(73, dtype=np.float32) * 100,
long_name='y', units='m'),
0)
cube.standard_name = 'air_temperature'
cube.units = 'K'
cube.assert_valid()
self.cube = cube
def test_simple(self):
iplt.contourf(self.cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestMappingSubRegion(tests.GraphicsTest):
def setUp(self):
cube_path = tests.get_data_path(
('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
cube = iris.load_cube(cube_path)[0]
# make the data smaller to speed things up.
self.cube = cube[::10, ::10]
def test_simple(self):
# First sub-plot
plt.subplot(221)
plt.title('Default')
iplt.contourf(self.cube)
plt.gca().coastlines()
# Second sub-plot
plt.subplot(222, projection=ccrs.Mollweide(central_longitude=120))
plt.title('Molleweide')
iplt.contourf(self.cube)
plt.gca().coastlines()
# Third sub-plot (the projection part is redundant, but a useful
# test none-the-less)
ax = plt.subplot(223, projection=iplt.default_projection(self.cube))
plt.title('Native')
iplt.contour(self.cube)
ax.coastlines()
# Fourth sub-plot
ax = plt.subplot(2, 2, 4, projection=ccrs.PlateCarree())
plt.title('PlateCarree')
iplt.contourf(self.cube)
ax.coastlines()
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.RotatedPole(357.5 - 180, 37.5,
globe=_DEFAULT_GLOBE))
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
(313.01998901, 391.11999512, -22.48999977, 24.80999947))
@tests.skip_data
@tests.skip_plot
class TestLowLevel(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.global_pp()
self.few = 4
self.few_levels = list(range(280, 300, 5))
self.many_levels = np.linspace(
self.cube.data.min(), self.cube.data.max(), 40)
def test_simple(self):
iplt.contour(self.cube)
self.check_graphic()
def test_params(self):
iplt.contourf(self.cube, self.few)
self.check_graphic()
iplt.contourf(self.cube, self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, self.many_levels)
self.check_graphic()
def test_keywords(self):
iplt.contourf(self.cube, levels=self.few_levels)
self.check_graphic()
iplt.contourf(self.cube, levels=self.many_levels, alpha=0.5)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestBoundedCube(tests.GraphicsTest):
def setUp(self):
self.cube = iris.tests.stock.global_pp()
# Add some bounds to this data (this will actually make the bounds
# invalid as they will straddle the north pole and overlap on the
# dateline, but that doesn't matter for this test.)
self.cube.coord('latitude').guess_bounds()
self.cube.coord('longitude').guess_bounds()
def test_pcolormesh(self):
# pcolormesh can only be drawn in native coordinates (or more
# specifically, in coordinates that don't wrap).
plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.outline(self.cube)
self.check_graphic()
def test_default_projection_and_extent(self):
self.assertEqual(iplt.default_projection(self.cube),
ccrs.PlateCarree())
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(self.cube),
[0., 360., -89.99995422, 89.99998474])
np_testing.assert_array_almost_equal(
iplt.default_projection_extent(
self.cube, mode=iris.coords.BOUND_MODE),
[-1.875046, 358.124954, -91.24995422, 91.24998474])
@tests.skip_data
@tests.skip_plot
class TestLimitedAreaCube(tests.GraphicsTest):
def setUp(self):
cube_path = tests.get_data_path(('PP', 'aPProt1', 'rotated.pp'))
self.cube = iris.load_cube(cube_path)[::20, ::20]
self.cube.coord('grid_latitude').guess_bounds()
self.cube.coord('grid_longitude').guess_bounds()
def test_pcolormesh(self):
iplt.pcolormesh(self.cube)
self.check_graphic()
def test_grid(self):
iplt.pcolormesh(self.cube, facecolors='none', edgecolors='blue')
# the result is a graphic which has coloured edges. This is a mpl bug,
# see https://github.com/matplotlib/matplotlib/issues/1302
self.check_graphic()
def test_outline(self):
iplt.outline(self.cube)
self.check_graphic()
def test_scatter(self):
iplt.points(self.cube)
plt.gca().coastlines()
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
ChanderG/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
cllamb0/dosenet-raspberrypi | graph.py | 2 | 7840 | # Python file that graphs air quality test result CSV files
import matplotlib.pyplot as plt
import matplotlib.figure as fig
import matplotlib.pylab as pyl
import matplotlib
import csv
import dateutil
import argparse
import numpy
import time
import datetime
from matplotlib.dates import DateFormatter
#Given a certain argument, combine results for each size molecule
parser = argparse.ArgumentParser()
parser.add_argument("combinenumber", type = int, help = "Enter a natural number value that will determine the amount of results added together before being graphed")
info = parser.parse_args()
combine_number = info.combinenumber
user_file = input("What air quality test result file do you want to graph? (Put quotation marks around the file name.) File Name: ")
results = csv.reader(open(user_file), delimiter=',')
times = []
P3 = []
P5 = []
P10 = []
P25 = []
P50 = []
P100 = []
Val10 = []
Val25 = []
Val100 = []
row_counter= 0
for r in results:
row_counter += 1
if row_counter>1:
#Append each column in CSV to a separate list
times.append(dateutil.parser.parse(r[0])) #converts str date and time to datetime
P3.append(r[1])
P5.append(r[2])
P10.append(r[3])
P25.append(r[4])
P50.append(r[5])
P100.append(r[6])
Val10.append(r[7])
Val25.append(r[8])
Val100.append(r[9])
#Make sure the argument was valid
while len(times)< combine_number or combine_number<1:
if len(times) == 1:
print("The number provided was too large or not a natural number. There is only 1 result. All data points will be graphed.")
combine_number = 1
elif len(times) == 0:
print("There are no results in the document provided.")
quit()
else:
combine_number = input("The number provided was too large or not a natural number. There are "+str(len(times))+" results. Choose a natural number between 1 and "+str(len(times))+" that will determine the amount of results added together before being graphed. Number: ")
#convert str into int
for i in range(len(P3)):
P3[i] = int(P3[i])
for i in range(len(P5)):
P5[i] = int(P5[i])
for i in range(len(P10)):
P10[i] = int(P10[i])
for i in range(len(P25)):
P25[i] = int(P25[i])
for i in range(len(P50)):
P50[i] = int(P50[i])
for i in range(len(P100)):
P100[i] = int(P100[i])
for i in range(len(Val10)):
Val10[i] = int(Val10[i])
for i in range(len(Val25)):
Val25[i] = int(Val25[i])
for i in range(len(Val100)):
Val100[i] = int(Val100[i])
new_P3 = []
new_P5 = []
new_P10 = []
new_P25 = []
new_P50 = []
new_P100 = []
new_Val10 = []
new_Val25 = []
new_Val100 = []
#Get rid of unnecessary data
remainder_P3 = (len(P3)%(combine_number))
if remainder_P3 !=0:
for i in range(int(remainder_P3)):
P3.pop()
remainder_P5 = (len(P5)%(combine_number))
if remainder_P5 !=0:
for i in range(int(remainder_P5)):
P5.pop()
remainder_P10 = (len(P10)%(combine_number))
if remainder_P10 !=0:
for i in range(int(remainder_P10)):
P10.pop()
remainder_P25 = (len(P25)%(combine_number))
if remainder_P25 !=0:
for i in range(int(remainder_P25)):
P25.pop()
remainder_P50 = (len(P50)%(combine_number))
if remainder_P50 !=0:
for i in range(int(remainder_P50)):
P50.pop()
remainder_P100 = (len(P100)%(combine_number))
if remainder_P100 !=0:
for i in range(int(remainder_P100)):
P100.pop()
remainder_Val10 = (len(Val10)%(combine_number))
if remainder_Val10 !=0:
for i in range(int(remainder_Val10)):
Val10.pop()
remainder_Val25 = (len(Val25)%(combine_number))
if remainder_Val25 !=0:
for i in range(int(remainder_Val25)):
Val25.pop()
remainder_Val100 = (len(Val100)%(combine_number))
if remainder_Val100 !=0:
for i in range(int(remainder_Val100)):
Val100.pop()
#Add up data
for i in range(int(len(P3)/combine_number)):
numberA = int(i*combine_number)
numberB = int((i*combine_number) + combine_number)
sum_P3 = [sum(P3[numberA:numberB])]
new_P3.append(sum_P3)
for i in range(int(len(P5)/combine_number)):
numberC = int(i*combine_number)
numberD = int((i*combine_number) + combine_number)
sum_P5 = [sum(P5[numberC:numberD])]
new_P5.append(sum_P5)
for i in range(int(len(P10)/combine_number)):
numberE = int(i*combine_number)
numberF = int((i*combine_number) + combine_number)
sum_P10 = [sum(P10[numberE:numberF])]
new_P10.append(sum_P10)
for i in range(int(len(P25)/combine_number)):
numberG = int(i*combine_number)
numberH = int((i*combine_number) + combine_number)
sum_P25 = [sum(P25[numberG:numberH])]
new_P25.append(sum_P25)
for i in range(int(len(P50)/combine_number)):
numberJ = int(i*combine_number)
numberK = int((i*combine_number) + combine_number)
sum_P50 = [sum(P50[numberJ:numberK])]
new_P50.append(sum_P50)
for i in range(int(len(P100)/combine_number)):
numberL = int((i*combine_number) + 1)
numberM = int((i*combine_number) + combine_number)
sum_P100 = [sum(P100[numberL:numberM])]
new_P100.append(sum_P100)
for i in range(int(len(Val10)/combine_number)):
numberQ = int(i*combine_number)
numberR = int((i*combine_number) + combine_number)
sum_Val10 = [sum(Val10[numberQ:numberR])]
new_Val10.append(sum_Val10)
for i in range(int(len(Val25)/combine_number)):
numberS = int(i*combine_number)
numberT = int((i*combine_number) + combine_number)
sum_Val25 = [sum(Val25[numberS:numberT])]
new_Val25.append(sum_Val25)
for i in range(int(len(Val100)/combine_number)):
numberV = int(i*combine_number)
numberW = int((i*combine_number) + combine_number)
sum_Val100 = [sum(Val100[numberV:numberW])]
new_Val100.append(sum_Val100)
#Get rid of last time if unecessary
if remainder_P25 !=0:
for i in range(int(remainder_P25)):
times.pop()
#State how many results have been excluded
if int(remainder_P25) != 1:
print(str(int(remainder_P25))+" results have been excluded from the graph.")
else:
print("1 result has been excluded from the graph.")
#Find middle time
middletimes = []
for i in range(int(len(times)/combine_number)):
numberN = int(i*combine_number)
numberP = int((i*combine_number) + combine_number)
time = times[numberN:numberP]
middletimes.append(time[int(len(time)/2)])
print(middletimes)
#Use plot() method to graph particle count vs. time and add legend
plt.figure(figsize = [5,5])
plt.plot(middletime_final, new_P3, "b.", label='P3')
plt.plot(middletime_final, new_P5, "g.", label = 'P5')
plt.plot(middletime_final, new_P10, "r.", label = 'P10')
plt.plot(middletime_final, new_P25, "m.", label = 'P25')
plt.plot(middletime_final, new_P50, "y.", label = 'P50')
plt.plot(middletime_final, new_P100, "c.", label = 'P100')
plt.legend(loc="best", title = "Key")
plt.xlabel("Time")
plt.ylabel("Particle Count")
file_title = "Air Quality Test Results: From "+datetime.datetime.strftime(times[0], "%Y-%m-%d %H:%M:%S")+" To "+datetime.datetime.strftime(times[-1], "%Y-%m-%d %H:%M:%S")
plt.title("Particle Count vs. Time")
wtitle = pyl.gcf()
wtitle.canvas.set_window_title(file_title)
#Use plot() method to graph particle concentration vs. time and add legend
plt.figure(figsize = [5,5])
plt.plot(middletime_final, new_Val10, "b.", label='1.0')
plt.plot(middletime_final, new_Val25, "g.", label = '2.5')
plt.plot(middletime_final, new_Val100, "r.", label = '10')
plt.legend(loc="best", title = "Key")
plt.xlabel("Time")
plt.ylabel("Particle Concentration")
file_title = "Air Quality Test Results: From "+datetime.datetime.strftime(times[0], "%Y-%m-%d %H:%M:%S")+" To "+datetime.datetime.strftime(times[-1], "%Y-%m-%d %H:%M:%S")
plt.title("Particle Concentration vs. Time")
wtitle = pyl.gcf()
wtitle.canvas.set_window_title(file_title)
plt.show()
| mit |
liyu1990/sklearn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
sevas/csxj-crawler | scripts/make_figures.py | 1 | 7756 | #from __future__ import division
#
#import os, os.path
#import json
#import itertools
#from collections import namedtuple
#import matplotlib.pyplot as plt
#import numpy as np
#
#from csxj.datasources.parser_tools.article import ArticleData
#
#
#
#BACKGROUND_COLOR = '#e3e1dd'
#LIGHT_COLOR = '#f0efed'
#DARK_COLOR = '#4c4c4c'
#
#
#def get_subdirs(parent_path):
# return [d for d in os.listdir(parent_path) if os.path.isdir(os.path.join(parent_path, d))]
#
#
#
#def get_articles(json_filepath):
# with open(json_filepath, 'r') as f:
# json_content = json.load(f)
# articles_string = json_content['articles']
# return [ArticleData.from_json(article_s) for article_s in articles_string]
#
#
#
#def get_flat_article_list(provider_path):
# all_days = get_subdirs(provider_path)
# all_articles = []
#
#
# for day in all_days:
# day_path = os.path.join(provider_path, day)
# all_batches = get_subdirs(day_path)
#
# for batch in all_batches:
# batch_path = os.path.join(day_path, batch)
# all_articles.extend(get_articles(os.path.join(batch_path, 'articles.json')))
#
# return all_articles
#
#
#def categorize_articles(articles):
# def keyfunc(article):
# return article.category
#
# groups = []
# uniquekeys = []
# data = sorted(articles, key=keyfunc)
# for k, g in itertools.groupby(data, keyfunc):
# groups.append(list(g)) # Store group iterator as a list
# uniquekeys.append(k)
#
# return zip(uniquekeys, groups)
#
#
#
#def count_links(articles):
# ext_links = sum([len(art.external_links) for art in articles])
# int_links = sum([len(art.internal_links) for art in articles])
#
# return ext_links, int_links
#
#
#
#CategoryCounters = namedtuple('CategoryCounters', 'name total_links total_articles link_article_ratio')
#
#
#def make_barchart_in_subplot(ax, xs, title, labels):
# ind = np.arange(len(xs))
# ind = np.arange(len(xs))
# ax.barh(ind, xs, color=LIGHT_COLOR)
# ax.set_yticklabels(ind+0.35, labels, fontsize='small')
# ax.set_title(title)
#
#
#def make_barchart(xs, title, labels):
# ind = np.arange(len(xs))
# plt.barh(ind, xs, color=LIGHT_COLOR)
# plt.yticks(ind+0.35, labels, fontsize='small')
# plt.title(title)
#
#
#
#def sort_categories_by_links_article_ratio(categorized_articles):
# link_counters = list()
#
# max_total_articles = len(max(categorized_articles, key=lambda a: len(a[1]))[1])
#
# for (group, articles) in categorized_articles:
# total_articles = len(articles)
#
# total_links = sum(count_links(articles))
# if total_links and max_total_articles:
# ratio = (total_articles / total_links) / max_total_articles
# link_counters.append(CategoryCounters(name=group,
# total_links=total_links,
# total_articles=total_articles,
# link_article_ratio=ratio))
#
# def keyfunc(counter):
# return counter.link_article_ratio
# link_counters.sort(key=keyfunc)
#
# return link_counters
#
#
#def plot_categories_by_links_article_ratio_in_subplot(ax, categorized_articles, source_name):
# link_counters = sort_categories_by_links_article_ratio(categorized_articles)
#
# x = np.array([c.link_article_ratio for c in link_counters])
#
# def make_label(counter):
# return u'{0} (n_a={1} n_l={2})'.format(u'/'.join(counter.name),
# counter.total_articles,
# counter.total_links)
#
#
# labels = [make_label(c) for c in link_counters]
# make_barchart_in_subplot(ax, x, source_name, labels)
#
#
#def plot_categories_by_links_article_ratio(name, categorized_articles, outdir):
# link_counters = sort_categories_by_links_article_ratio(categorized_articles)
#
# x = np.array([c.link_article_ratio for c in link_counters])
#
# def make_label(counter):
# return u'{0} (n_a={1} n_l={2})'.format(u'/'.join(counter.name),
# counter.total_articles,
# counter.total_links)
#
# plt.clf()
# labels = [make_label(c) for c in link_counters]
# make_barchart(x, 'Categories by article/links ratio ({0})'.format(name), labels)
# plt.savefig(os.path.join(outdir, name+'_article_link_ratio.png'), bbox_inches='tight')
#
#
#
#def plot_categories_by_number_of_articles(name, categorized_articles, outdir):
# article_counters = list()
# for (group, articles) in categorized_articles:
# article_counters.append((group, len(articles)))
#
# def keyfunc(counter):
# return counter[1]
# article_counters.sort(key=keyfunc)
#
# x = np.array([counter[1] for counter in article_counters])
#
# def make_label(article_counter):
# return u'{0}'.format(u'/'.join(article_counter[0]))
#
# plt.clf()
# labels = [make_label(c) for c in article_counters]
# make_barchart(x, '# Articles per category ({0})'.format(name), labels)
# plt.savefig(os.path.join(outdir, name+'_articles_by_category.png'), bbox_inches='tight')
#
#
#
#def plot_categories_by_number_of_links(name, categorized_articles, outdir):
#
# LinkCounter = namedtuple('LinkCounter', 'name total_ext_links total_int_links total_links')
#
# link_counters = list()
# for (group, articles) in categorized_articles:
# n_ext_links, n_int_links = count_links(articles)
# link_counters.append(LinkCounter(name=group,
# total_ext_links=n_ext_links,
# total_int_links=n_int_links,
# total_links=n_ext_links+n_int_links))
#
# def keyfunc(counter):
# return counter.total_links
# link_counters.sort(key=keyfunc)
#
#
# x1 = np.array([counter.total_ext_links for counter in link_counters])
# x2 = np.array([counter.total_int_links for counter in link_counters])
#
# def make_label(link_counter):
# return u'{0}'.format(u'/'.join(link_counter.name))
# labels = [make_label(c) for c in link_counters]
#
# plt.clf()
# plt.autumn()
# ind = np.arange(len(x1))
# p1 = plt.barh(ind, x1, color=DARK_COLOR)
# p2 = plt.barh(ind, x2, left=x1, color=LIGHT_COLOR)
# plt.yticks(ind+0.35, labels, fontsize='small')
# plt.title('Number of links per category ({0})'.format(name))
# plt.legend( (p1[0], p2[0]), ('External links', 'Internal links'), 'lower right' )
# plt.savefig(os.path.join(outdir, name+'_number_of_links.png'), bbox_inches='tight')
#
#
#
#def make_all_figures(db_root, outdir):
# if not os.path.exists(outdir):
# os.mkdir(outdir)
#
# for source_dir in get_subdirs(db_root):
# articles = get_flat_article_list(os.path.join(db_root, source_dir))
#
# categorized_articles = categorize_articles(articles)
#
# plot_categories_by_links_article_ratio(source_dir, categorized_articles, outdir)
# plot_categories_by_number_of_articles(source_dir, categorized_articles, outdir)
# plot_categories_by_number_of_links(source_dir, categorized_articles, outdir)
#
#
#if __name__=='__main__':
# import argparse
#
# parser = argparse.ArgumentParser(description='Make (hopefully) interesting figures from the json db')
# parser.add_argument('--dir', type=str, dest='input_dir', required=True, help='json db directory')
# parser.add_argument('--outdir', type=str, dest='output_dir', required=True, help='directory to dump the figures in')
# args = parser.parse_args()
# make_all_figures(args.input_dir, args.output_dir)
#
#
#
#
| mit |
trachelr/mne-python | mne/decoding/time_gen.py | 2 | 49656 | # Authors: Jean-Remi King <[email protected]>
# Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Clement Moutard <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import copy
from ..io.pick import pick_types
from ..viz.decoding import plot_gat_matrix, plot_gat_times
from ..parallel import parallel_func, check_n_jobs
class _DecodingTime(dict):
"""A dictionary to configure the training times that has the following keys:
'slices' : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
'start' : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
'stop' : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
'step' : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
'length' : float
Duration of each classifier (in seconds). Defaults to one time sample.
If None, empty dict. """
def __repr__(self):
s = ""
if "start" in self:
s += "start: %0.3f (s)" % (self["start"])
if "stop" in self:
s += ", stop: %0.3f (s)" % (self["stop"])
if "step" in self:
s += ", step: %0.3f (s)" % (self["step"])
if "length" in self:
s += ", length: %0.3f (s)" % (self["length"])
if "slices" in self:
# identify depth: training times only contains n_time but
# testing_times can contain n_times or n_times * m_times
depth = [len(ii) for ii in self["slices"]]
if len(np.unique(depth)) == 1: # if all slices have same depth
if depth[0] == 1: # if depth is one
s += ", n_time_windows: %s" % (len(depth))
else:
s += ", n_time_windows: %s x %s" % (len(depth), depth[0])
else:
s += (", n_time_windows: %s x [%s, %s]" %
(len(depth),
min([len(ii) for ii in depth]),
max(([len(ii) for ii in depth]))))
return "<DecodingTime | %s>" % s
class _GeneralizationAcrossTime(object):
""" see GeneralizationAcrossTime
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_mode='cross-validation', scorer=None,
n_jobs=1):
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
# Store parameters in object
self.cv = cv
# Define training sliding window
self.train_times = (_DecodingTime() if train_times is None
else _DecodingTime(train_times))
# Define testing sliding window. If None, will be set in predict()
if test_times is None:
self.test_times = _DecodingTime()
elif test_times == 'diagonal':
self.test_times = 'diagonal'
else:
self.test_times = _DecodingTime(test_times)
# Default classification pipeline
if clf is None:
scaler = StandardScaler()
estimator = LogisticRegression()
clf = Pipeline([('scaler', scaler), ('estimator', estimator)])
self.clf = clf
self.predict_mode = predict_mode
self.scorer = scorer
self.picks = picks
self.n_jobs = n_jobs
def fit(self, epochs, y=None):
""" Train a classifier on each specified time slice.
Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : GeneralizationAcrossTime
Returns fitted GeneralizationAcrossTime object.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
from sklearn.base import clone
from sklearn.cross_validation import check_cv, StratifiedKFold
# clean attributes
for att in ['picks_', 'ch_names', 'y_train_', 'cv_', 'train_times_',
'estimators_', 'test_times_', 'y_pred_', 'y_true_',
'scores_', 'scorer_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
# Extract data from MNE structure
X, y, self.picks_ = _check_epochs_input(epochs, y, self.picks)
self.ch_names = [epochs.ch_names[p] for p in self.picks_]
cv = self.cv
if isinstance(cv, (int, np.int)):
cv = StratifiedKFold(y, cv)
cv = check_cv(cv, X, y, classifier=True)
self.cv_ = cv # update CV
self.y_train_ = y
# Cross validation scheme
# XXX Cross validation should later be transformed into a make_cv, and
# defined in __init__
self.train_times_ = copy.deepcopy(self.train_times)
if 'slices' not in self.train_times_:
self.train_times_ = _sliding_window(epochs.times, self.train_times)
# Parallel across training time
parallel, p_time_gen, n_jobs = parallel_func(_fit_slices, n_jobs)
n_chunks = min(X.shape[2], n_jobs)
splits = np.array_split(self.train_times_['slices'], n_chunks)
def f(x):
return np.unique(np.concatenate(x))
out = parallel(p_time_gen(clone(self.clf),
X[..., f(train_slices_chunk)],
y, train_slices_chunk, cv)
for train_slices_chunk in splits)
# Unpack estimators into time slices X folds list of lists.
self.estimators_ = sum(out, list())
return self
def predict(self, epochs):
""" Test each classifier on each specified testing time slice.
.. note:: This function sets the ``y_pred_`` and ``test_times_``
attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_train_t, n_test_t, n_epochs, n_prediction_dims)
The single-trial predictions at each training time and each testing
time. Note that the number of testing times per training time need
not be regular; else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs)``.
""" # noqa
# Check that at least one classifier has been trained
if not hasattr(self, 'estimators_'):
raise RuntimeError('Please fit models before trying to predict')
# clean attributes
for att in ['y_pred_', 'test_times_', 'scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
n_jobs = self.n_jobs
X, y, _ = _check_epochs_input(epochs, None, self.picks_)
# Define testing sliding window
if self.test_times == 'diagonal':
test_times = _DecodingTime()
test_times['slices'] = [[s] for s in self.train_times_['slices']]
test_times['times'] = [[s] for s in self.train_times_['times']]
elif isinstance(self.test_times, dict):
test_times = copy.deepcopy(self.test_times)
else:
raise ValueError('`test_times` must be a dict or "diagonal"')
if 'slices' not in test_times:
# Check that same number of time sample in testing than in training
# (otherwise it won 't be the same number of features')
if 'length' not in test_times:
test_times['length'] = self.train_times_['length']
if test_times['length'] != self.train_times_['length']:
raise ValueError('`train_times` and `test_times` must have '
'identical `length` keys')
# Make a sliding window for each training time.
slices_list = list()
times_list = list()
for t in range(0, len(self.train_times_['slices'])):
test_times_ = _sliding_window(epochs.times, test_times)
times_list += [test_times_['times']]
slices_list += [test_times_['slices']]
test_times = test_times_
test_times['slices'] = slices_list
test_times['times'] = times_list
# Store all testing times parameters
self.test_times_ = test_times
# Prepare parallel predictions
parallel, p_time_gen, _ = parallel_func(_predict_time_loop, n_jobs)
# Loop across estimators (i.e. training times)
self.y_pred_ = parallel(p_time_gen(X, self.estimators_[t_train],
self.cv_, slices, self.predict_mode)
for t_train, slices in
enumerate(self.test_times_['slices']))
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
Note. The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of lists of float
The scores estimated by ``scorer_`` at each training time and each
testing time (e.g. mean accuracy of ``predict(X)``). Note that the
number of testing times per training time need not be regular;
else, np.shape(scores) = (n_train_time, n_test_time).
"""
from sklearn.metrics import accuracy_score
# Run predictions if not already done
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
# clean gat.score() attributes
for att in ['scores_', 'scorer_', 'y_true_']:
if hasattr(self, att):
delattr(self, att)
# Check scorer
# XXX Need API to identify proper scorer from the clf
self.scorer_ = accuracy_score if self.scorer is None else self.scorer
# If no regressor is passed, use default epochs events
if y is None:
if self.predict_mode == 'cross-validation':
y = self.y_train_
else:
if epochs is not None:
y = epochs.events[:, 2]
else:
raise RuntimeError('y is undefined because '
'predict_mode="mean-prediction" and '
'epochs are missing. You need to '
'explicitly specify y.')
if not np.all(np.unique(y) == np.unique(self.y_train_)):
raise ValueError('Classes (y) passed differ from classes used '
'for training. Please explicitly pass your y '
'for scoring.')
elif isinstance(y, list):
y = np.array(y)
self.y_true_ = y # to be compared with y_pred for scoring
# Preprocessing for parallelization:
n_jobs = min(len(self.y_pred_[0][0]), check_n_jobs(self.n_jobs))
parallel, p_time_gen, n_jobs = parallel_func(_score_loop, n_jobs)
# Score each training and testing time point
scores = parallel(p_time_gen(self.y_true_, self.y_pred_[t_train],
slices, self.scorer_)
for t_train, slices
in enumerate(self.test_times_['slices']))
self.scores_ = scores
return scores
def _predict_time_loop(X, estimators, cv, slices, predict_mode):
"""Aux function of GeneralizationAcrossTime
Run classifiers predictions loop across time samples.
Parameters
----------
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
estimators : array-like, shape (n_times, n_folds)
Array of scikit-learn classifiers fitted in cross-validation.
slices : list
List of slices selecting data from X from which is prediction is
generated.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
'cross-validation' : estimates a single prediction per sample based
on the unique independent classifier fitted in the cross-
validation.
'mean-prediction' : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
"""
n_epochs = len(X)
# Loop across testing slices
y_pred = [list() for _ in range(len(slices))]
# XXX EHN: This loop should be parallelized in a similar way to fit()
for t, indices in enumerate(slices):
# Flatten features in case of multiple time samples
Xtrain = X[:, :, indices].reshape(
n_epochs, np.prod(X[:, :, indices].shape[1:]))
# Single trial predictions
if predict_mode == 'cross-validation':
# If predict within cross validation, only predict with
# corresponding classifier, else predict with each fold's
# classifier and average prediction.
# Check that training cv and predicting cv match
if (len(estimators) != cv.n_folds) or (cv.n != Xtrain.shape[0]):
raise ValueError(
'When `predict_mode = "cross-validation"`, the training '
'and predicting cv schemes must be identical.')
for k, (train, test) in enumerate(cv):
# XXX I didn't manage to initialize correctly this array, as
# its size depends on the the type of predictor and the
# number of class.
if k == 0:
y_pred_ = _predict(Xtrain[test, :], estimators[k:k + 1])
y_pred[t] = np.empty((n_epochs, y_pred_.shape[1]))
y_pred[t][test, :] = y_pred_
y_pred[t][test, :] = _predict(Xtrain[test, :],
estimators[k:k + 1])
elif predict_mode == 'mean-prediction':
y_pred[t] = _predict(Xtrain, estimators)
else:
raise ValueError('`predict_mode` must be a str, "mean-prediction"'
' or "cross-validation"')
return y_pred
def _score_loop(y_true, y_pred, slices, scorer):
n_time = len(slices)
# Loop across testing times
scores = [0] * n_time
for t, indices in enumerate(slices):
# Scores across trials
scores[t] = scorer(y_true, y_pred[t])
return scores
def _check_epochs_input(epochs, y, picks=None):
"""Aux function of GeneralizationAcrossTime
Format MNE data into scikit-learn X and y
Parameters
----------
epochs : instance of Epochs
The epochs.
y : ndarray shape (n_epochs) | list shape (n_epochs) | None
To-be-fitted model. If y is None, y == epochs.events.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
Returns
-------
X : ndarray, shape (n_epochs, n_selected_chans, n_times)
To-be-fitted data.
y : ndarray, shape (n_epochs,)
To-be-fitted model.
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
"""
if y is None:
y = epochs.events[:, 2]
elif isinstance(y, list):
y = np.array(y)
# Convert MNE data into trials x features x time matrix
X = epochs.get_data()
# Pick channels
if picks is None: # just use good data channels
picks = pick_types(epochs.info, meg=True, eeg=True, seeg=True,
eog=False, ecg=False, misc=False, stim=False,
ref_meg=False, exclude='bads')
if isinstance(picks, (list, np.ndarray)):
picks = np.array(picks, dtype=np.int)
else:
raise ValueError('picks must be a list or a numpy.ndarray of int')
X = X[:, picks, :]
# Check data sets
assert X.shape[0] == y.shape[0]
return X, y, picks
def _fit_slices(clf, x_chunk, y, slices, cv):
"""Aux function of GeneralizationAcrossTime
Fit each classifier.
Parameters
----------
clf : scikit-learn classifier
The classifier object.
x_chunk : ndarray, shape (n_epochs, n_features, n_times)
To-be-fitted data.
y : list | array, shape (n_epochs,)
To-be-fitted model.
slices : list | array, shape (n_training_slice,)
List of training slices, indicating time sample relative to X
cv : scikit-learn cross-validation generator
A cross-validation generator to use.
Returns
-------
estimators : list of lists of estimators
List of fitted scikit-learn classifiers corresponding to each training
slice.
"""
from sklearn.base import clone
# Initialize
n_epochs = len(x_chunk)
estimators = list()
# Identify the time samples of X_chunck corresponding to X
values = np.unique(np.concatenate(slices))
indices = range(len(values))
# Loop across time slices
for t_slice in slices:
# Translate absolute time samples into time sample relative to x_chunk
for ii in indices:
t_slice[t_slice == values[ii]] = indices[ii]
# Select slice
X = x_chunk[..., t_slice]
# Reshape data matrix to flatten features in case of multiple time
# samples.
X = X.reshape(n_epochs, np.prod(X.shape[1:]))
# Loop across folds
estimators_ = list()
for fold, (train, test) in enumerate(cv):
# Fit classifier
clf_ = clone(clf)
clf_.fit(X[train, :], y[train])
estimators_.append(clf_)
# Store classifier
estimators.append(estimators_)
return estimators
def _sliding_window(times, window_params):
"""Aux function of GeneralizationAcrossTime
Define the slices on which to train each classifier.
Parameters
----------
times : ndarray, shape (n_times,)
Array of times from MNE epochs.
window_params : dict keys: ('start', 'stop', 'step', 'length')
Either train or test times. See GAT documentation.
Returns
-------
time_pick : list
List of training slices, indicating for each classifier the time
sample (in indices of times) to be fitted on.
"""
window_params = _DecodingTime(window_params)
# Sampling frequency as int
freq = (times[-1] - times[0]) / len(times)
# Default values
if ('slices' in window_params and
all(k in window_params for k in
('start', 'stop', 'step', 'length'))):
time_pick = window_params['slices']
else:
if 'start' not in window_params:
window_params['start'] = times[0]
if 'stop' not in window_params:
window_params['stop'] = times[-1]
if 'step' not in window_params:
window_params['step'] = freq
if 'length' not in window_params:
window_params['length'] = freq
if (window_params['start'] < times[0] or
window_params['start'] > times[-1]):
raise ValueError(
'`start` (%.2f s) outside time range [%.2f, %.2f].' % (
window_params['start'], times[0], times[-1]))
if (window_params['stop'] < times[0] or
window_params['stop'] > times[-1]):
raise ValueError(
'`stop` (%.2f s) outside time range [%.2f, %.2f].' % (
window_params['stop'], times[0], times[-1]))
if window_params['step'] < freq:
raise ValueError('`step` must be >= 1 / sampling_frequency')
if window_params['length'] < freq:
raise ValueError('`length` must be >= 1 / sampling_frequency')
if window_params['length'] > np.ptp(times):
raise ValueError('`length` must be <= time range')
# Convert seconds to index
def find_time_idx(t): # find closest time point
return np.argmin(np.abs(np.asarray(times) - t))
start = find_time_idx(window_params['start'])
stop = find_time_idx(window_params['stop'])
step = int(round(window_params['step'] / freq))
length = int(round(window_params['length'] / freq))
# For each training slice, give time samples to be included
time_pick = [range(start, start + length)]
while (time_pick[-1][0] + step) <= (stop - length + 1):
start = time_pick[-1][0] + step
time_pick.append(range(start, start + length))
window_params['slices'] = time_pick
# Keep last training times in milliseconds
t_inds_ = [t[-1] for t in window_params['slices']]
window_params['times'] = times[t_inds_]
return window_params
def _predict(X, estimators):
"""Aux function of GeneralizationAcrossTime
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
estimators : ndarray, shape (n_folds,) | shape (1,)
Array of scikit-learn classifiers to predict data.
X : ndarray, shape (n_epochs, n_features, n_times)
To-be-predicted data
Returns
-------
y_pred : ndarray, shape (n_epochs, m_prediction_dimensions)
Classifier's prediction for each trial.
"""
from scipy import stats
from sklearn.base import is_classifier
# Initialize results:
n_epochs = X.shape[0]
n_clf = len(estimators)
# Compute prediction for each sub-estimator (i.e. per fold)
# if independent, estimators = all folds
for fold, clf in enumerate(estimators):
_y_pred = clf.predict(X)
# initialize predict_results array
if fold == 0:
predict_size = _y_pred.shape[1] if _y_pred.ndim > 1 else 1
y_pred = np.ones((n_epochs, predict_size, n_clf))
if predict_size == 1:
y_pred[:, 0, fold] = _y_pred
else:
y_pred[:, :, fold] = _y_pred
# Collapse y_pred across folds if necessary (i.e. if independent)
if fold > 0:
# XXX need API to identify how multiple predictions can be combined?
if is_classifier(clf):
y_pred, _ = stats.mode(y_pred, axis=2)
else:
y_pred = np.mean(y_pred, axis=2)
# Format shape
y_pred = y_pred.reshape((n_epochs, predict_size))
return y_pred
class GeneralizationAcrossTime(_GeneralizationAcrossTime):
"""Generalize across time and conditions
Creates and estimator object used to 1) fit a series of classifiers on
multidimensional time-resolved data, and 2) test the ability of each
classifier to generalize across other time samples.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.cross_validation module for the list of possible objects.
Defaults to 5.
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and LogisticRegression with default parameters.
train_times : dict | None
A dictionary to configure the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds).
Defaults to min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds).
Defaults to max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). Defaults to one time sample.
``length`` : float
Duration of each classifier (in seconds).
Defaults to one time sample.
If None, empty dict.
test_times : 'diagonal' | dict | None, optional
Configures the testing times.
If set to 'diagonal', predictions are made at the time at which
each classifier is trained.
If set to None, predictions are made at all time points.
If set to dict, the dict should contain ``slices`` or be contructed in
a similar way to train_times::
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
If None, empty dict.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
``mean-prediction`` : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
scorer : object | None
scikit-learn Scorer instance. If None, set to accuracy_score.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
picks_ : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
y_train_ : list | ndarray, shape (n_samples,)
The categories used for training.
train_times_ : dict
A dictionary that configures the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
test_times_ : dict
A dictionary that configures the testing times for each training time:
``slices`` : ndarray, shape (n_clfs, n_testing_times)
Array of time slices (in indices) used for each classifier.
``times`` : ndarray, shape (n_clfs, n_testing_times)
The testing times (in seconds) for each training time.
cv_ : CrossValidation object
The actual CrossValidation input depending on y.
estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
y_pred_ : list of lists of arrays of floats, shape (n_train_times, n_test_times, n_epochs, n_prediction_dims)
The single-trial predictions estimated by self.predict() at each
training time and each testing time. Note that the number of testing
times per training time need not be regular, else
``np.shape(y_pred_) = (n_train_time, n_test_time, n_epochs).``
y_true_ : list | ndarray, shape (n_samples,)
The categories used for scoring ``y_pred_``.
scorer_ : object
scikit-learn Scorer instance.
scores_ : list of lists of float
The scores estimated by ``self.scorer_`` at each training time and each
testing time (e.g. mean accuracy of self.predict(X)). Note that the
number of testing times per training time need not be regular;
else, ``np.shape(scores) = (n_train_time, n_test_time)``.
See Also
--------
TimeDecoding
Notes
-----
The function implements the method used in:
Jean-Remi King, Alexandre Gramfort, Aaron Schurger, Lionel Naccache
and Stanislas Dehaene, "Two distinct dynamic modes subtend the
detection of unexpected sounds", PLoS ONE, 2014
DOI: 10.1371/journal.pone.0085791
.. versionadded:: 0.9.0
""" # noqa
def __init__(self, picks=None, cv=5, clf=None, train_times=None,
test_times=None, predict_mode='cross-validation', scorer=None,
n_jobs=1):
super(GeneralizationAcrossTime, self).__init__(
picks=picks, cv=cv, clf=clf, train_times=train_times,
test_times=test_times, predict_mode=predict_mode, scorer=scorer,
n_jobs=n_jobs)
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.train_times_['start'], self.train_times_['stop'])
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0][0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<GAT | %s>" % s
def plot(self, title=None, vmin=None, vmax=None, tlim=None, ax=None,
cmap='RdBu_r', show=True, colorbar=True,
xlabel=True, ylabel=True):
"""Plotting function of GeneralizationAcrossTime object
Plot the score of each classifier at each tested time window.
Parameters
----------
title : str | None
Figure title.
vmin : float | None
Min color value for scores. If None, sets to min(gat.scores_).
vmax : float | None
Max color value for scores. If None, sets to max(gat.scores_).
tlim : ndarray, (train_min, test_max) | None
The time limits used for plotting.
ax : object | None
Plot pointer. If None, generate new figure.
cmap : str | cmap object
The color map to be used. Defaults to 'RdBu_r'.
show : bool
If True, the figure will be shown. Defaults to True.
colorbar : bool
If True, the colorbar of the figure is displayed. Defaults to True.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_matrix(self, title=title, vmin=vmin, vmax=vmax,
tlim=tlim, ax=ax, cmap=cmap, show=show,
colorbar=colorbar, xlabel=xlabel, ylabel=ylabel)
def plot_diagonal(self, title=None, xmin=None, xmax=None, ymin=None,
ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot each classifier score trained and tested at identical time
windows.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
return plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
def plot_times(self, train_time, title=None, xmin=None, xmax=None,
ymin=None, ymax=None, ax=None, show=True, color=None,
xlabel=True, ylabel=True, legend=True, chance=True,
label='Classif. score'):
"""Plotting function of GeneralizationAcrossTime object
Plot the scores of the classifier trained at specific training time(s).
Parameters
----------
train_time : float | list or array of float
Plots scores of the classifier trained at train_time.
title : str | None
Figure title.
xmin : float | None, optional
Min time value.
xmax : float | None, optional
Max time value.
ymin : float | None, optional
Min score value. If None, sets to min(scores).
ymax : float | None, optional
Max score value. If None, sets to max(scores).
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str or list of str
Score line color(s).
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float.
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
if (not isinstance(train_time, float) and
not (isinstance(train_time, (list, np.ndarray)) and
np.all([isinstance(time, float) for time in train_time]))):
raise ValueError('train_time must be float | list or array of '
'floats. Got %s.' % type(train_time))
return plot_gat_times(self, train_time=train_time, title=title,
xmin=xmin, xmax=xmax,
ymin=ymin, ymax=ymax, ax=ax, show=show,
color=color, xlabel=xlabel, ylabel=ylabel,
legend=legend, chance=chance, label=label)
class TimeDecoding(_GeneralizationAcrossTime):
"""Train and test a series of classifiers at each time point to obtain a
score across time.
Parameters
----------
picks : array-like of int | None
The channels indices to include. If None the data
channels in info, except bad channels, are used.
cv : int | object
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
scikit-learn.cross_validation module for the list of possible objects.
Defaults to 5.
clf : object | None
An estimator compliant with the scikit-learn API (fit & predict).
If None the classifier will be a standard pipeline including
StandardScaler and a Logistic Regression with default parameters.
times : dict | None
A dictionary to configure the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``start`` : float
Time at which to start decoding (in seconds). By default,
min(epochs.times).
``stop`` : float
Maximal time at which to stop decoding (in seconds). By
default, max(times).
``step`` : float
Duration separating the start of subsequent classifiers (in
seconds). By default, equals one time sample.
``length`` : float
Duration of each classifier (in seconds). By default, equals
one time sample.
If None, empty dict.
predict_mode : {'cross-validation', 'mean-prediction'}
Indicates how predictions are achieved with regards to the cross-
validation procedure:
``cross-validation`` : estimates a single prediction per sample
based on the unique independent classifier fitted in the
cross-validation.
``mean-prediction`` : estimates k predictions per sample, based on
each of the k-fold cross-validation classifiers, and average
these predictions into a single estimate per sample.
Default: 'cross-validation'
scorer : object | None
scikit-learn Scorer instance. If None, set to accuracy_score.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
Attributes
----------
picks_ : array-like of int | None
The channels indices to include.
ch_names : list, array-like, shape (n_channels,)
Names of the channels used for training.
y_train_ : ndarray, shape (n_samples,)
The categories used for training.
times_ : dict
A dictionary that configures the training times:
``slices`` : ndarray, shape (n_clfs,)
Array of time slices (in indices) used for each classifier.
If not given, computed from 'start', 'stop', 'length', 'step'.
``times`` : ndarray, shape (n_clfs,)
The training times (in seconds).
cv_ : CrossValidation object
The actual CrossValidation input depending on y.
estimators_ : list of list of scikit-learn.base.BaseEstimator subclasses.
The estimators for each time point and each fold.
y_pred_ : ndarray, shape (n_times, n_epochs, n_prediction_dims)
Class labels for samples in X.
y_true_ : list | ndarray, shape (n_samples,)
The categories used for scoring y_pred_.
scorer_ : object
scikit-learn Scorer instance.
scores_ : list of float, shape (n_times,)
The scores (mean accuracy of self.predict(X) wrt. y.).
See Also
--------
GeneralizationAcrossTime
Notes
-----
The function is equivalent to the diagonal of GeneralizationAcrossTime()
.. versionadded:: 0.10
"""
def __init__(self, picks=None, cv=5, clf=None, times=None,
predict_mode='cross-validation', scorer=None, n_jobs=1):
super(TimeDecoding, self).__init__(picks=picks, cv=cv, clf=None,
train_times=times,
test_times='diagonal',
predict_mode=predict_mode,
scorer=scorer, n_jobs=n_jobs)
self._clean_times()
def __repr__(self):
s = ''
if hasattr(self, "estimators_"):
s += "fitted, start : %0.3f (s), stop : %0.3f (s)" % (
self.times_['start'], self.times_['stop'])
else:
s += 'no fit'
if hasattr(self, 'y_pred_'):
s += (", predicted %d epochs" % len(self.y_pred_[0]))
else:
s += ", no prediction"
if hasattr(self, "estimators_") and hasattr(self, 'scores_'):
s += ',\n '
else:
s += ', '
if hasattr(self, 'scores_'):
s += "scored"
if callable(self.scorer_):
s += " (%s)" % (self.scorer_.__name__)
else:
s += "no score"
return "<TimeDecoding | %s>" % s
def fit(self, epochs, y=None):
""" Train a classifier on each specified time slice.
Note. This function sets the ``picks_``, ``ch_names``, ``cv_``,
``y_train``, ``train_times_`` and ``estimators_`` attributes.
Parameters
----------
epochs : instance of Epochs
The epochs.
y : list or ndarray of int, shape (n_samples,) or None, optional
To-be-fitted model values. If None, y = epochs.events[:, 2].
Returns
-------
self : TimeDecoding
Returns fitted TimeDecoding object.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
self._prep_times()
super(TimeDecoding, self).fit(epochs, y=y)
self._clean_times()
return self
def predict(self, epochs):
""" Test each classifier on each specified testing time slice.
.. note:: This function sets the ``y_pred_`` and ``test_times_``
attributes.
Parameters
----------
epochs : instance of Epochs
The epochs. Can be similar to fitted epochs or not. See
predict_mode parameter.
Returns
-------
y_pred : list of lists of arrays of floats, shape (n_times, n_epochs, n_prediction_dims)
The single-trial predictions at each time sample.
""" # noqa
self._prep_times()
super(TimeDecoding, self).predict(epochs)
self._clean_times()
return self.y_pred_
def score(self, epochs=None, y=None):
"""Score Epochs
Estimate scores across trials by comparing the prediction estimated for
each trial to its true value.
Calls ``predict()`` if it has not been already.
Note. The function updates the ``scorer_``, ``scores_``, and
``y_true_`` attributes.
Parameters
----------
epochs : instance of Epochs | None, optional
The epochs. Can be similar to fitted epochs or not.
If None, it needs to rely on the predictions ``y_pred_``
generated with ``predict()``.
y : list | ndarray, shape (n_epochs,) | None, optional
True values to be compared with the predictions ``y_pred_``
generated with ``predict()`` via ``scorer_``.
If None and ``predict_mode``=='cross-validation' y = ``y_train_``.
Returns
-------
scores : list of float, shape (n_times,)
The scores estimated by ``scorer_`` at each time sample (e.g. mean
accuracy of ``predict(X)``).
"""
if epochs is not None:
self.predict(epochs)
else:
if not hasattr(self, 'y_pred_'):
raise RuntimeError('Please predict() epochs first or pass '
'epochs to score()')
self._prep_times()
super(TimeDecoding, self).score(epochs=None, y=y)
self._clean_times()
return self.scores_
def plot(self, title=None, xmin=None, xmax=None, ymin=None, ymax=None,
ax=None, show=True, color=None, xlabel=True, ylabel=True,
legend=True, chance=True, label='Classif. score'):
"""Plotting function
Predict each classifier. If multiple classifiers are passed, average
prediction across all classifiers to result in a single prediction per
classifier.
Parameters
----------
title : str | None
Figure title.
xmin : float | None, optional,
Min time value.
xmax : float | None, optional,
Max time value.
ymin : float
Min score value. Defaults to 0.
ymax : float
Max score value. Defaults to 1.
ax : object | None
Instance of mataplotlib.axes.Axis. If None, generate new figure.
show : bool
If True, the figure will be shown. Defaults to True.
color : str
Score line color. Defaults to 'steelblue'.
xlabel : bool
If True, the xlabel is displayed. Defaults to True.
ylabel : bool
If True, the ylabel is displayed. Defaults to True.
legend : bool
If True, a legend is displayed. Defaults to True.
chance : bool | float. Defaults to None
Plot chance level. If True, chance level is estimated from the type
of scorer.
label : str
Score label used in the legend. Defaults to 'Classif. score'.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
# XXX JRK: need cleanup in viz
self._prep_times()
fig = plot_gat_times(self, train_time='diagonal', title=title,
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, ax=ax,
show=show, color=color, xlabel=xlabel,
ylabel=ylabel, legend=legend, chance=chance,
label=label)
self._clean_times()
return fig
def _prep_times(self):
"""Auxiliary function to allow compability with GAT"""
self.test_times = 'diagonal'
if hasattr(self, 'times'):
self.train_times = self.times
if hasattr(self, 'times_'):
self.train_times_ = self.times_
self.test_times_ = _DecodingTime()
self.test_times_['slices'] = [[slic] for slic in
self.train_times_['slices']]
self.test_times_['times'] = [[tim] for tim in
self.train_times_['times']]
if hasattr(self, 'scores_'):
self.scores_ = [[score] for score in self.scores_]
if hasattr(self, 'y_pred_'):
self.y_pred_ = [[y_pred] for y_pred in self.y_pred_]
def _clean_times(self):
"""Auxiliary function to allow compability with GAT"""
if hasattr(self, 'train_times'):
self.times = self.train_times
if hasattr(self, 'train_times_'):
self.times_ = self.train_times_
for attr in ['test_times', 'train_times',
'test_times_', 'train_times_']:
if hasattr(self, attr):
delattr(self, attr)
if hasattr(self, 'y_pred_'):
self.y_pred_ = [y_pred[0] for y_pred in self.y_pred_]
if hasattr(self, 'scores_'):
self.scores_ = [score[0] for score in self.scores_]
| bsd-3-clause |
lamby/pkg-rst2pdf | rst2pdf/math_flowable.py | 8 | 6239 | # -*- coding: utf-8 -*-
# See LICENSE.txt for licensing terms
import tempfile
import os
import re
from reportlab.platypus import *
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase import pdfmetrics
from opt_imports import mathtext
from log import log
HAS_MATPLOTLIB = mathtext is not None
if HAS_MATPLOTLIB:
from matplotlib.font_manager import FontProperties
from matplotlib.colors import ColorConverter
fonts = {}
def enclose(s):
"""Enclose the string in $...$ if needed"""
if not re.match(r'.*\$.+\$.*', s, re.MULTILINE | re.DOTALL):
s = u"$%s$" % s
return s
class Math(Flowable):
def __init__(self, s, label=None, fontsize=12,color='black'):
self.s = s
self.label = label
self.fontsize = fontsize
self.color = color
if HAS_MATPLOTLIB:
self.parser = mathtext.MathTextParser("Pdf")
else:
log.error("Math support not available,"
" some parts of this document will be rendered incorrectly."
" Install matplotlib.")
Flowable.__init__(self)
self.hAlign='CENTER'
def wrap(self, aW, aH):
if HAS_MATPLOTLIB:
try:
width, height, descent, glyphs, \
rects, used_characters = self.parser.parse(
enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
return width, height
except:
pass
# FIXME: report error
return 10, 10
def drawOn(self, canv, x, y, _sW=0):
if _sW and hasattr(self,'hAlign'):
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
a = self.hAlign
if a in ('CENTER','CENTRE', TA_CENTER):
x = x + 0.5*_sW
elif a in ('RIGHT',TA_RIGHT):
x = x + _sW
elif a not in ('LEFT',TA_LEFT):
raise ValueError, "Bad hAlign value "+str(a)
height = 0
if HAS_MATPLOTLIB:
global fonts
canv.saveState()
canv.translate(x, y)
try:
width, height, descent, glyphs, \
rects, used_characters = self.parser.parse(
enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
if not fontname in fonts:
fonts[fontname] = fontname
pdfmetrics.registerFont(TTFont(fontname, fontname))
canv.setFont(fontname, fontsize)
col_conv=ColorConverter()
rgb_color=col_conv.to_rgb(self.color)
canv.setFillColorRGB(rgb_color[0],rgb_color[1],rgb_color[2])
canv.drawString(ox, oy, unichr(num))
canv.setLineWidth(0)
canv.setDash([])
for ox, oy, width, height in rects:
canv.rect(ox, oy+2*height, width, height, fill=1)
except:
# FIXME: report error
col_conv=ColorConverter()
rgb_color=col_conv.to_rgb(self.color)
canv.setFillColorRGB(rgb_color[0],rgb_color[1],rgb_color[2])
canv.drawString(0,0,self.s)
canv.restoreState()
else:
canv.saveState()
canv.drawString(x, y, self.s)
canv.restoreState()
if self.label:
log.info('Drawing equation-%s'%self.label)
canv.bookmarkHorizontal('equation-%s'%self.label,0,height)
def descent(self):
"""Return the descent of this flowable,
useful to align it when used inline."""
if HAS_MATPLOTLIB:
width, height, descent, glyphs, rects, used_characters = \
self.parser.parse(enclose(self.s), 72, prop=FontProperties(size=self.fontsize))
return descent
return 0
def genImage(self):
"""Create a PNG from the contents of this flowable.
Required so we can put inline math in paragraphs.
Returns the file name.
The file is caller's responsability.
"""
dpi = 72
scale = 10
try:
import Image
import ImageFont
import ImageDraw
import ImageColor
except ImportError:
from PIL import (
Image,
ImageFont,
ImageDraw,
ImageColor,
)
if not HAS_MATPLOTLIB:
img = Image.new('RGBA', (120, 120), (255,255,255,0))
else:
width, height, descent, glyphs,\
rects, used_characters = self.parser.parse(
enclose(self.s), dpi, prop=FontProperties(size=self.fontsize))
img = Image.new('RGBA', (int(width*scale), int(height*scale)),(255,255,255,0))
draw = ImageDraw.Draw(img)
for ox, oy, fontname, fontsize, num, symbol_name in glyphs:
font = ImageFont.truetype(fontname, int(fontsize*scale))
tw, th = draw.textsize(unichr(num), font=font)
# No, I don't understand why that 4 is there.
# As we used to say in the pure math
# department, that was a numerical solution.
col_conv=ColorConverter()
fc=col_conv.to_rgb(self.color)
rgb_color=(int(fc[0]*255),int(fc[1]*255),int(fc[2]*255))
draw.text((ox*scale, (height - oy - fontsize + 4)*scale),
unichr(num), font=font,fill=rgb_color)
for ox, oy, w, h in rects:
x1 = ox*scale
x2 = x1 + w*scale
y1 = (height - oy)*scale
y2 = y1 + h*scale
draw.rectangle([x1, y1, x2, y2],(0,0,0))
fh, fn = tempfile.mkstemp(suffix=".png")
os.close(fh)
img.save(fn)
return fn
if __name__ == "__main__":
doc = SimpleDocTemplate("mathtest.pdf")
Story = [Math(r'\mathcal{R}\prod_{i=\alpha\mathcal{B}}'\
r'^\infty a_i\sin(2 \pi f x_i)')]
doc.build(Story)
| mit |
jbogaardt/chainladder-python | examples/plot_munich_resid.py | 1 | 1537 | """
====================================
Munich Chainladder Correlation Plots
====================================
This example demonstrates how to recreate the the residual correlation plots
of the Munich Chainladder paper.
"""
import chainladder as cl
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Fit Munich Model
mcl = cl.load_sample('mcl')
model = cl.MunichAdjustment([('paid', 'incurred')]).fit(mcl)
# Plot Data
fig, ((ax0, ax1)) = plt.subplots(ncols=2, figsize=(15,5))
# Paid lambda line
pd.DataFrame(
{'(P/I)': np.linspace(-2,2,2),
'P': np.linspace(-2,2,2)*model.lambda_.loc['paid']}).plot(
x='(P/I)', y='P', legend=False, ax=ax0)
# Paid scatter
paid_plot = pd.concat(
(model.resids_['paid'].melt(value_name='P')['P'],
model.q_resids_['paid'].melt(value_name='(P/I)')['(P/I)']),
axis=1).plot(
kind='scatter', y='P', x='(P/I)', ax=ax0,
xlim=(-2,2), ylim=(-2,2), grid=True, title='Paid')
# Incurred lambda line
inc_lambda = pd.DataFrame(
{'(I/P)': np.linspace(-2,2,2),
'I': np.linspace(-2,2,2)*model.lambda_.loc['incurred']})
inc_lambda.plot(x='(I/P)', y='I', ax=ax1, legend=False);
# Incurred scatter
incurred_plot = pd.concat(
(model.resids_['incurred'].melt(value_name='I')['I'],
model.q_resids_['incurred'].melt(value_name='(I/P)')['(I/P)']),
axis=1).plot(
kind='scatter', y='I', x='(I/P)', ax=ax1,
xlim=(-2,2), ylim=(-2,2), grid=True, title='Incurred');
fig.suptitle("Munich Chainladder Residual Correlations");
| mit |
zooniverse/aggregation | experimental/condor/site.py | 2 | 3590 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
import random
import math
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from divisiveKmeans import DivisiveKmeans
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['condor_2014-11-11']
classification_collection = db["condor_classifications"]
subject_collection = db["condor_subjects"]
relations = []
one = []
#print subject_collection.count({"classification_count":{"$gt":1}})
for subject in subject_collection.find({"classification_count":{"$gt":1}}):
#if not("USFWS photos/Remote Feeding Site Photos/Remote Feeding Photos_2008/Bitter Creek/NRFS/NRFS 4.16-4.17.2008=CORA, 17CACO/" in subject["metadata"]["file"]):
if not("USFWS photos/Remote Feeding Site Photos/Remote Feeding Photos_2011/Bitter Creek/BC 34.929570, -119.363840 Dec 17-Jan 8, 2011-12" in subject["metadata"]["file"]):
continue
zooniverse_id = subject["zooniverse_id"]
#print zooniverse_id
# print subject["metadata"]["file"]
# print subject["location"]["standard"]
annotation_list = []
user_list = []
for user_index,classification in enumerate(classification_collection.find({"subjects.zooniverse_id":zooniverse_id})):
try:
mark_index = [ann.keys() for ann in classification["annotations"]].index(["marks",])
markings = classification["annotations"][mark_index].values()[0]
for animal in markings.values():
scale = 1.875
x = scale*float(animal["x"])
y = scale*float(animal["y"])
try:
animal_type = animal["animal"]
#if not(animal_type in ["carcassOrScale","carcass"]):
if animal_type == "condor":
annotation_list.append((x,y))
user_list.append(user_index)
except KeyError:
annotation_list.append((x,y))
user_list.append(user_index)
except ValueError:
pass
user_identified_condors,clusters = DivisiveKmeans(3).fit2(annotation_list,user_list,debug=True)
#print len(user_identified_condors)
tt = 0
if len(user_identified_condors) > 1:
for c1_index in range(len(clusters)):
for c2_index in range(c1_index+1,len(clusters)):
condor1 = user_identified_condors[c1_index]
condor2 = user_identified_condors[c2_index]
dist = math.sqrt((condor1[0]-condor2[0])**2+(condor1[1]-condor2[1])**2)
users_1 = [user_list[annotation_list.index(pt)] for pt in clusters[c1_index]]
users_2 = [user_list[annotation_list.index(pt)] for pt in clusters[c2_index]]
overlap = [u for u in users_1 if u in users_2]
if len(overlap) <= 1:
relations.append((dist,len(overlap),c1_index,c2_index))
tt += 1
#relations.sort(key= lambda x:x[0])
if tt > 0:
one.append(zooniverse_id)
print tt
print len(relations)
x = zip(*relations)[0]
n, bins, patches = plt.hist(x, 20)
print bins
print one
plt.show()
| apache-2.0 |
santis19/tesina-fisica | Flexion/modelo-sintetico/6-deflexion-flexural/garcia.py | 1 | 1205 | import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from garcia_functions import *
"""
A partir de una topografia y una grilla de rigidez D obtenemos la deflexion
que genera dicha carga topografica a traves del metodo de Garcia (2014).
"""
def ploteo(x, y, z, contours=150, title=""):
plt.axes(aspect='equal')
plt.contourf(x, y, z, contours)
plt.colorbar()
plt.title(title)
plt.show()
## LEO ARCHIVO DATOS
datos = np.loadtxt("../grillas/datos.txt")
topo = np.loadtxt("../grillas/topografia.npy")
Te = np.loadtxt("../grillas/espesor-elastico-grillado.npy")
x1, x2, y1, y2, nx, ny, rho_t, rho_c, rho_m, t = datos[:]
E = 1e11
nu = 0.25
## GRILLA DE TRABAJO
x = np.linspace(x1, x2, nx)
y = np.linspace(y1, y2, ny)
x, y = np.meshgrid(x, y)
Te[np.isnan(Te)] = 0
print Te
Te_0 = 12000
w0 = deflection_calculation(x, y, topo, rho_t, rho_c, rho_m, Te_0, E, nu)
w = garcia_iteration(x, y, w0, Te, Te_0, rho_t, rho_c, rho_m, E, nu, rms_dif_tolerance=1e-13)
#~ np.savetxt("../grillas/topografia.npy", topo)
np.savetxt("../grillas/deflexion-flexural.npy", w)
ploteo(x, y, Te, title="Te")
ploteo(x, y, w0, title="w0")
ploteo(x, y, w, title="w")
| gpl-2.0 |
zehpunktbarron/iOSMAnalyzer | scripts/c2_actuality_point.py | 1 | 5651 | # -*- coding: utf-8 -*-
#!/usr/bin/python2.7
#description :This file creates a plot: Calculate the actuality of all points
#author :Christopher Barron @ http://giscience.uni-hd.de/
#date :19.01.2013
#version :0.1
#usage :python pyscript.py
#==============================================================================
import psycopg2
from pylab import *
# import db connection parameters
import db_conn_para as db
###
### Connect to database with psycopg2. Add arguments from parser to the connection-string
###
try:
conn_string="dbname= %s user= %s host= %s password= %s" %(db.g_my_dbname, db.g_my_username, db.g_my_hostname, db.g_my_dbpassword)
print "Connecting to database\n->%s" % (conn_string)
conn = psycopg2.connect(conn_string)
print "Connection to database was established succesfully"
except:
print "Connection to database failed"
###
### Execute SQL query
###
# New cursor method for sql
cur = conn.cursor()
# Execute SQL query. For more than one row use three '"'
try:
cur.execute("""
SELECT
(SELECT
count(id)
FROM
(SELECT
id,
-- select latest edit in the whole database as timestamp of the dataset
extract(days FROM(SELECT max(valid_from) FROM hist_plp) - valid_from) AS age
FROM
hist_point
WHERE
visible = 'true' AND
(version = (SELECT max(version) FROM hist_point AS h WHERE h.id = hist_point.id AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) FROM hist_point AS h WHERE h.id = hist_point.id AND h.version = hist_point.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null))))
) AS foo
WHERE
age <= 183 -- less than 6 months
),
(SELECT
count(id)
FROM
(SELECT
id,
-- select latest edit in the whole database as timestamp of the dataset
extract(days FROM(SELECT max(valid_from) FROM hist_plp) - valid_from) AS age
FROM
hist_point
WHERE
visible = 'true' AND
(version = (SELECT max(version) FROM hist_point AS h WHERE h.id = hist_point.id AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) FROM hist_point AS h WHERE h.id = hist_point.id AND h.version = hist_point.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null))))
) AS foo
WHERE
age > 183 AND age <= 365 -- older than 6 months and les than 1 year
),
(SELECT
count(id)
FROM
(SELECT
id,
-- select latest edit in the whole database as timestamp of the dataset
extract(days FROM(SELECT max(valid_from) FROM hist_plp) - valid_from) AS age
FROM
hist_point
WHERE
visible = 'true' AND
(version = (SELECT max(version) FROM hist_point AS h WHERE h.id = hist_point.id AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) FROM hist_point AS h WHERE h.id = hist_point.id AND h.version = hist_point.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null))))
) AS foo
WHERE
age > 365 AND age <= 730 -- older than 1 year and less than 2 years
),
(SELECT
count(id)
FROM
(SELECT
id,
-- select latest edit in the whole database as timestamp of the dataset
extract(days FROM(SELECT max(valid_from) FROM hist_plp) - valid_from) AS age
FROM
hist_point
WHERE
visible = 'true' AND
(version = (SELECT max(version) FROM hist_point AS h WHERE h.id = hist_point.id AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null)))
AND minor = (SELECT max(minor) FROM hist_point AS h WHERE h.id = hist_point.id AND h.version = hist_point.version AND
(valid_from <= CURRENT_TIMESTAMP AND (valid_to >= CURRENT_TIMESTAMP OR valid_to is null))))
) AS foo
WHERE
age > 730 -- older than 2 years
)
""")
# Return the results of the query. Fetchall() = all rows, fetchone() = first row
records = cur.fetchone()
cur.close()
except:
print "Query could not be executed"
# Get data from query
one = records[0]
two = records[1]
three = records[2]
four = records[3]
# make a square figure and axes
figure(1, figsize=(9,9))
ax = axes([0.2, 0.2, 0.6, 0.6])
# pie-labelling
labels = '< 6 months', '> 6 and <= 12 months', '> 1 year and <= 2 years', '> 2 years'
# get db-values as fracs
fracs = [one, two, three, four]
# explode values
explode=(0.05, 0.05, 0.05, 0.05)
# Color in RGB. not shure about the values (counts). Source: http://stackoverflow.com/questions/5133871/how-to-plot-a-pie-of-color-list
# Matplotlib for some reasons changes colors. Therefore color 2 and 4 have to be changed...
data = {(0, 210, 0): 11, (236, 0, 0): 11, (255, 127, 36): 11, (234, 234, 0): 11, } # values in hexa: #2DD700 ,#00A287, #FF6700
colors = []
counts = []
for color, count in data.items():
# matplotlib wants colors as 0.0-1.0 floats, not 0-255 ints
colors.append([float(x)/255 for x in color])
counts.append(count)
# Percentage (and total values)
def my_autopct(pct):
total=sum(fracs)
val=int(pct*total/100.0)
return '{p:.1f}% ({v:d})'.format(p=pct,v=val)
# The pie chart (DB-values, explode pies, Labels, decimal places, add shadows to pies
pie(fracs, explode=explode, colors=colors, autopct=my_autopct, labels=labels, shadow=True)
# Title of the pie chart
title('Actuality of all OSM-Point-Features')
# Save plot to *.png-file
plt.savefig('pics/c2_actuality_point.jpeg')
plt.clf()
| gpl-3.0 |
hvanwyk/quadmesh | debug/debug_hanging_nodes.py | 1 | 3525 | from mesh import Mesh
from finite_element import QuadFE, DofHandler
import matplotlib.pyplot as plt
import numpy as np
from plot import Plot
# TODO: Add this all to the test file.
mesh = Mesh.newmesh()
mesh.refine()
mesh.root_node().children['SE'].mark('l')
mesh.refine('l')
mesh.root_node().children['SE'].children['SW'] = None
_,ax = plt.subplots()
element_type = 'Q3'
V = QuadFE(2,element_type)
d = DofHandler(mesh,V)
d.distribute_dofs()
order = int(list(element_type)[1])
x = [i*1.0/order for i in range(order+1)]
xy = []
for xi in x:
for yi in x:
xy.append((xi,yi))
zlist = []
for n in range(order+1):
print('\n\n')
for yi in x:
zline = []
for xi in x:
zline.append(V.phi(n,np.array([(xi,yi)])))
print([v for v in zline])
"""
print('Evaluating shape functions at hanging nodes.')
x = [i*0.5/order for i in range(2*order+1)]
xy = [(xi,1.0) for xi in x]
for xx in xy:
print('Point {0}'.format(xx))
zline = []
for n in range(12):
zline.append(V.phi(n,xx))
print([v for v in zline])
print('\n')
"""
C = d.make_hanging_node_constraints()
print(C)
c = V.constraint_coefficients()
plot = Plot()
plot.mesh(ax, mesh, element=V, cell_numbers=True, vertex_numbers=False, dofs=True )
plt.show()
'''
# Number nodes Q1
count = 0
for node in mesh.root_node().find_leaves():
node.dofs = dict.fromkeys(['SW','SE','NW','NE'])
for key in ['SW','SE','NW','NE']:
if node.dofs[key] == None:
node.dofs[key] = count
#
# Shared cells
#
for direction in list(key).append(key):
nb = node.find_neighbor(direction)
if nb != None
if nb.has_children():
nb = nb.children[opposite[direction]]
no_neighbor = dict.fromkeys(list(key), False)
print(no_neighbor)
for direction in list(key):
nb = node.find_neighbor(direction)
if nb != None:
pos = key.replace(direction,opposite[direction])
if nb.has_children():
child = nb.children[pos]
shared_cells[direction] = {pos: child}
else:
shared_cells[direction] = {pos: nb}
else:
no_neighbor[direction] = True
# Diagonal neighbor
if all(no_neighbor.values()):
# No neighbors in either direction
"""
if nb.has_children():
new_key =
nb = nb.children[new_key]
if nb != None:
if not hasattr(nb,'dofs'):
nb.dofs = dict.fromkeys(['SW','SE','NW','NE'])
if nb.dofs[key] != None:
nb.dofs().vertices[new_key] = count
else:
if nb.dofs[key] != None:
nb.dofs[key.replace(direction,opposite[direction])] = count
if not hasattr(nb,'dofs'):
nb.dofs = dict.fromkeys(['SW','SE','NW','NE'])
"""
print(no_neighbor)
count += 1
''' | mit |
krez13/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
russel1237/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 329 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
sunny94/temp | sympy/external/importtools.py | 85 | 7294 | """Tools to assist importing optional external modules."""
from __future__ import print_function, division
import sys
# Override these in the module to change the default warning behavior.
# For example, you might set both to False before running the tests so that
# warnings are not printed to the console, or set both to True for debugging.
WARN_NOT_INSTALLED = None # Default is False
WARN_OLD_VERSION = None # Default is True
def __sympy_debug():
# helper function from sympy/__init__.py
# We don't just import SYMPY_DEBUG from that file because we don't want to
# import all of sympy just to use this module.
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
if __sympy_debug():
WARN_OLD_VERSION = True
WARN_NOT_INSTALLED = True
def import_module(module, min_module_version=None, min_python_version=None,
warn_not_installed=None, warn_old_version=None,
module_version_attr='__version__', module_version_attr_call_args=None,
__import__kwargs={}, catch=()):
"""
Import and return a module if it is installed.
If the module is not installed, it returns None.
A minimum version for the module can be given as the keyword argument
min_module_version. This should be comparable against the module version.
By default, module.__version__ is used to get the module version. To
override this, set the module_version_attr keyword argument. If the
attribute of the module to get the version should be called (e.g.,
module.version()), then set module_version_attr_call_args to the args such
that module.module_version_attr(*module_version_attr_call_args) returns the
module's version.
If the module version is less than min_module_version using the Python <
comparison, None will be returned, even if the module is installed. You can
use this to keep from importing an incompatible older version of a module.
You can also specify a minimum Python version by using the
min_python_version keyword argument. This should be comparable against
sys.version_info.
If the keyword argument warn_not_installed is set to True, the function will
emit a UserWarning when the module is not installed.
If the keyword argument warn_old_version is set to True, the function will
emit a UserWarning when the library is installed, but cannot be imported
because of the min_module_version or min_python_version options.
Note that because of the way warnings are handled, a warning will be
emitted for each module only once. You can change the default warning
behavior by overriding the values of WARN_NOT_INSTALLED and WARN_OLD_VERSION
in sympy.external.importtools. By default, WARN_NOT_INSTALLED is False and
WARN_OLD_VERSION is True.
This function uses __import__() to import the module. To pass additional
options to __import__(), use the __import__kwargs keyword argument. For
example, to import a submodule A.B, you must pass a nonempty fromlist option
to __import__. See the docstring of __import__().
This catches ImportError to determine if the module is not installed. To
catch additional errors, pass them as a tuple to the catch keyword
argument.
Examples
========
>>> from sympy.external import import_module
>>> numpy = import_module('numpy')
>>> numpy = import_module('numpy', min_python_version=(2, 7),
... warn_old_version=False)
>>> numpy = import_module('numpy', min_module_version='1.5',
... warn_old_version=False) # numpy.__version__ is a string
>>> # gmpy does not have __version__, but it does have gmpy.version()
>>> gmpy = import_module('gmpy', min_module_version='1.14',
... module_version_attr='version', module_version_attr_call_args=(),
... warn_old_version=False)
>>> # To import a submodule, you must pass a nonempty fromlist to
>>> # __import__(). The values do not matter.
>>> p3 = import_module('mpl_toolkits.mplot3d',
... __import__kwargs={'fromlist':['something']})
>>> # matplotlib.pyplot can raise RuntimeError when the display cannot be opened
>>> matplotlib = import_module('matplotlib',
... __import__kwargs={'fromlist':['pyplot']}, catch=(RuntimeError,))
"""
# keyword argument overrides default, and global variable overrides
# keyword argument.
warn_old_version = (WARN_OLD_VERSION if WARN_OLD_VERSION is not None
else warn_old_version or True)
warn_not_installed = (WARN_NOT_INSTALLED if WARN_NOT_INSTALLED is not None
else warn_not_installed or False)
import warnings
# Check Python first so we don't waste time importing a module we can't use
if min_python_version:
if sys.version_info < min_python_version:
if warn_old_version:
warnings.warn("Python version is too old to use %s "
"(%s or newer required)" % (
module, '.'.join(map(str, min_python_version))),
UserWarning)
return
# PyPy 1.6 has rudimentary NumPy support and importing it produces errors, so skip it
if module == 'numpy' and '__pypy__' in sys.builtin_module_names:
return
try:
mod = __import__(module, **__import__kwargs)
## there's something funny about imports with matplotlib and py3k. doing
## from matplotlib import collections
## gives python's stdlib collections module. explicitly re-importing
## the module fixes this.
from_list = __import__kwargs.get('fromlist', tuple())
for submod in from_list:
if submod == 'collections' and mod.__name__ == 'matplotlib':
__import__(module + '.' + submod)
except ImportError:
if warn_not_installed:
warnings.warn("%s module is not installed" % module, UserWarning)
return
except catch as e:
if warn_not_installed:
warnings.warn(
"%s module could not be used (%s)" % (module, repr(e)))
return
if min_module_version:
modversion = getattr(mod, module_version_attr)
if module_version_attr_call_args is not None:
modversion = modversion(*module_version_attr_call_args)
if modversion < min_module_version:
if warn_old_version:
# Attempt to create a pretty string version of the version
if isinstance(min_module_version, basestring):
verstr = min_module_version
elif isinstance(min_module_version, (tuple, list)):
verstr = '.'.join(map(str, min_module_version))
else:
# Either don't know what this is. Hopefully
# it's something that has a nice str version, like an int.
verstr = str(min_module_version)
warnings.warn("%s version is too old to use "
"(%s or newer required)" % (module, verstr),
UserWarning)
return
return mod
| bsd-3-clause |
avmarchenko/exatomic | exatomic/interfaces/xyz.py | 2 | 5330 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
XYZ File Editor
##################
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import six
import csv
import numpy as np
import pandas as pd
from exa import TypedMeta
from exa.util.units import Length
from exa.util.utility import mkp
from exatomic.core.editor import Editor
from exatomic.core.frame import compute_frame_from_atom, Frame
from exatomic.core.atom import Atom
from exatomic.algorithms.indexing import starts_counts
class Meta(TypedMeta):
atom = Atom
frame = Frame
class XYZ(six.with_metaclass(Meta, Editor)):
"""
An editor for programmatically editing `xyz`_ files.
.. _xyz: https://en.wikipedia.org/wiki/XYZ_file_format
"""
_header = '{nat}\n{comment}\n'
_cols = ['symbol', 'x', 'y', 'z']
def parse_frame(self):
self.frame = compute_frame_from_atom(self.atom)
def parse_atom(self, unit='Angstrom', names=('symbol', 'x', 'y', 'z')):
"""
Parse the atom table from the current xyz file.
Args:
unit (str): Default xyz unit of length is the Angstrom
"""
df = pd.read_csv(six.StringIO(six.u(str(self))), delim_whitespace=True,
names=names, header=None,
skip_blank_lines=False)
# The following algorithm works for both trajectory files and single xyz files
nats = pd.Series(df[df[['y', 'z']].isnull().all(axis=1)].index)
nats = nats[nats.diff() != 1].values
comments = nats + 1
nats = df.loc[nats, 'symbol']
comments = df.loc[comments, :].dropna(how='all').index
initials = nats.index.values.astype(np.int64) + 2
counts = nats.values.astype(np.int64)
frame, _, indices = starts_counts(initials, counts)
df = df[df.index.isin(indices)]
df[['x', 'y', 'z']] = df[['x', 'y', 'z']].astype(np.float64)
df['symbol'] = df['symbol'].astype('category')
df['frame'] = frame
df['frame'] = df['frame'].astype('category')
df.reset_index(drop=True, inplace=True)
df.index.names = ['atom']
df['x'] *= Length[unit, 'au']
df['y'] *= Length[unit, 'au']
df['z'] *= Length[unit, 'au']
if self.meta is not None:
self.meta['comments'] = {line: self._lines[line] for line in comments}
else:
self.meta = {'comments': {line: self._lines[line] for line in comments}}
self.atom = df
def write(self, path, trajectory=True, float_format='% .8f'):
"""
Write an xyz file (or files) to disk.
Args:
path (str): Directory or file path
trajectory (bool): Write xyz trajectory file (default) or individual
Returns:
path (str): On success, return the directory or file path written
"""
if trajectory:
with open(path, 'w') as f:
f.write(str(self))
else:
grps = self.atom.cardinal_groupby()
n = len(str(self.frame.index.max()))
for frame, atom in grps:
filename = str(frame).zfill(n) + '.xyz'
with open(mkp(path, filename), 'w') as f:
f.write(self._header.format(nat=str(len(atom)),
comment='frame: ' + str(frame)))
a = atom[self._cols].copy()
a['x'] *= Length['au', 'Angstrom']
a['y'] *= Length['au', 'Angstrom']
a['z'] *= Length['au', 'Angstrom']
a.to_csv(f, header=False, index=False, sep=' ', float_format=float_format,
quoting=csv.QUOTE_NONE, escapechar=' ')
@classmethod
def from_universe(cls, universe, atom_table='atom', float_format='% .8f'):
"""
Create an xyz file editor from a given universe. If the universe has
more than one frame, creates an xyz trajectory format editor.
Args:
universe: The universe
atom_table (str): One of 'atom', 'unit', or 'visual' corresponding to coordinates
float_format (str): Floating point format (for writing)
"""
string = ''
grps = universe.atom.cardinal_groupby()
for frame, atom in grps:
string += cls._header.format(nat=len(atom), comment='frame: ' + str(frame))
atom_copy = atom[cls._cols].copy()
if atom_table == 'unit':
atom_copy.update(universe.unit_atom)
elif atom_table == 'visual':
atom_copy.update(universe.visual_atom)
atom_copy['x'] *= Length['au', 'Angstrom']
atom_copy['y'] *= Length['au', 'Angstrom']
atom_copy['z'] *= Length['au', 'Angstrom']
string += atom_copy.to_csv(sep=' ', header=False, quoting=csv.QUOTE_NONE,
index=False, float_format=float_format,
escapechar=' ')
return cls(string, name=universe.name, description=universe.description,
meta=universe.meta)
| apache-2.0 |
SchwarzerWolf/KSFH | main.py | 1 | 3687 | #!/usr/bin/env python
""" SchwarzerWolf.cc
**************************
*date = '2017-12-17' *
*module_version = '0.4.4'*
**************************
***********************************************************************
*[KSFH - Kraftsport und Fitness Helfer] *
*Module -> '.main.py' *
* *
*Copyleft [2017] - [SchwarzerWolf.cc] *
*This program is free software: you can redistribute it and/or modify *
*it under the terms of the GNU General Public License as published by *
*the Free Software Foundation, either version 3 of the License, or *
*(at your option) any later version. *
* *
*This program is distributed in the hope that it will be useful, *
*but WITHOUT ANY WARRANTY; without even the implied warranty of *
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
*GNU General Public License for more details. *
* *
*You should have received a copy of the GNU General Public License *
*along with this program. If not, see <http://www.gnu.org/licenses/>.*
***********************************************************************
"""
# Module imports
# **********************************************************
import tkinter as tk
from tkinter import ttk
from matplotlib import pyplot as plt
from modules.base import Base
from modules.calculate_reference import CalculationReference
from modules.license import ProgramLic
from modules.one_rep_max import OneRepMax
from modules.run import Run
from modules.strength_standards import StrengthStandards
from modules.version import program_title, program_version
# **********************************************************
# Constants
# *************************************************
plt.style.use('./templates/schwarzerwolf.mplstyle')
# *************************************************
class Program(tk.Tk):
""" Basis of the program. Used to configure the ttk.Notebook. """
def __init__(self):
tk.Tk.__init__(self)
# Title
self.title('{title} - {ver}'.format(title=program_title,
ver=program_version))
# Resizeable -> no
self.resizable(False, False)
# Notebook
self.nb = ttk.Notebook(master=self,
width=1100,
height=650)
# Methods -> load
self.nb_conf()
def nb_conf(self):
""" Here, large parts of the 'ttk.Notebook' are configured. """
# Classes
base = Base(self.nb)
one_rep_max = OneRepMax(self.nb)
strength_std = StrengthStandards(self.nb)
run = Run(self.nb)
calc_ref = CalculationReference(self.nb)
program_lic = ProgramLic(self.nb)
# Style
style = ttk.Style()
style.configure('TNotebook',
foreground='#ffffff',
background='#000000')
# Config
self.nb.add(base, text='Basis'),
self.nb.add(one_rep_max, text='1RM')
self.nb.add(strength_std, text='Kraftstandards')
self.nb.add(run, text='Laufsport')
self.nb.add(calc_ref, text='Berechnungsreferenz')
self.nb.add(program_lic, text='Programlizenz')
# Grid
self.nb.grid()
def main():
Program().mainloop()
if __name__ == '__main__':
main()
| gpl-3.0 |
davidgardenier/frbpoppy | tests/chime/obs_rep_frac.py | 1 | 1849 | """Calculate the actual CHIME repeater fraction."""
import pandas as pd
from frbcat import ChimeRepeaters
def calc_rep_frac():
df = ChimeRepeaters().df
df.timestamp = pd.to_datetime(df.timestamp)
df.sort_values('timestamp', inplace=True)
starting_time = df['timestamp'].iloc[0]
srcs_seen_once = []
srcs_seen_twice = []
time_rep_seen = []
for index, row in df.iterrows():
src = row['name']
dt = (row['timestamp'] - starting_time).total_seconds() / 86400
if src not in srcs_seen_once:
srcs_seen_once.append(src)
continue
elif src not in srcs_seen_twice:
# Timedelta at which repeater detected
time_rep_seen.append(dt)
srcs_seen_twice.append(src)
return time_rep_seen
if __name__ == '__main__':
import matplotlib.pyplot as plt
from tests.convenience import plot_aa_style, rel_path
import numpy as np
from tqdm import tqdm
# Set up plot style
plot_aa_style(cols=1)
f, ax1 = plt.subplots(1, 1)
# See how the real fraction changes over time
chime_fracs = []
dts = calc_rep_frac()
days = [d for d in range(301)]
for day in tqdm(days, desc='frbcat'):
n_rep = sum([dt <= day for dt in dts])
n_one_offs = 2*day
try:
frac = n_rep / (n_rep + n_one_offs)
except ZeroDivisionError:
frac = np.nan
chime_fracs.append(frac)
ax1.plot(days, chime_fracs, label='chime-frb')
# Further plot details
ax1.set_xlabel(r'Time (days)')
ax1.set_ylabel(r'$f_{\textrm{rep}}$')
# Equal to $N_{\textrm{repeaters}}/N_{\textrm{detections}}$
ax1.set_xlim(0, max(days))
ax1.set_yscale('log')
# Save figure
plt.tight_layout()
plt.savefig(rel_path('plots/obs_rep_frac_chime.pdf'))
plt.clf()
| mit |
CG-F16-24-Rutgers/steersuite-rutgers | steerstats/tools/deap/tools/support.py | 19 | 26469 | from __future__ import division
try:
import cPickle as pickle
except ImportError:
import pickle
from bisect import bisect_right
from collections import defaultdict
from copy import deepcopy
from functools import partial
from itertools import chain
from operator import eq
def identity(obj):
"""Returns directly the argument *obj*.
"""
return obj
class History(object):
"""The :class:`History` class helps to build a genealogy of all the
individuals produced in the evolution. It contains two attributes,
the :attr:`genealogy_tree` that is a dictionary of lists indexed by
individual, the list contain the indices of the parents. The second
attribute :attr:`genealogy_history` contains every individual indexed
by their individual number as in the genealogy tree.
The produced genealogy tree is compatible with `NetworkX
<http://networkx.lanl.gov/index.html>`_, here is how to plot the genealogy
tree ::
history = History()
# Decorate the variation operators
toolbox.decorate("mate", history.decorator)
toolbox.decorate("mutate", history.decorator)
# Create the population and populate the history
population = toolbox.population(n=POPSIZE)
history.update(population)
# Do the evolution, the decorators will take care of updating the
# history
# [...]
import matplotlib.pyplot as plt
import networkx
graph = networkx.DiGraph(history.genealogy_tree)
graph = graph.reverse() # Make the grah top-down
colors = [toolbox.evaluate(history.genealogy_history[i])[0] for i in graph]
networkx.draw(graph, node_color=colors)
plt.show()
Using NetworkX in combination with `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ (dot layout) this amazing
genealogy tree can be obtained from the OneMax example with a population
size of 20 and 5 generations, where the color of the nodes indicate there
fitness, blue is low and red is high.
.. image:: /_images/genealogy.png
:width: 67%
.. note::
The genealogy tree might get very big if your population and/or the
number of generation is large.
"""
def __init__(self):
self.genealogy_index = 0
self.genealogy_history = dict()
self.genealogy_tree = dict()
def update(self, individuals):
"""Update the history with the new *individuals*. The index present in
their :attr:`history_index` attribute will be used to locate their
parents, it is then modified to a unique one to keep track of those
new individuals. This method should be called on the individuals after
each variation.
:param individuals: The list of modified individuals that shall be
inserted in the history.
If the *individuals* do not have a :attr:`history_index` attribute,
the attribute is added and this individual is considered as having no
parent. This method should be called with the initial population to
initialize the history.
Modifying the internal :attr:`genealogy_index` of the history or the
:attr:`history_index` of an individual may lead to unpredictable
results and corruption of the history.
"""
try:
parent_indices = tuple(ind.history_index for ind in individuals)
except AttributeError:
parent_indices = tuple()
for ind in individuals:
self.genealogy_index += 1
ind.history_index = self.genealogy_index
self.genealogy_history[self.genealogy_index] = deepcopy(ind)
self.genealogy_tree[self.genealogy_index] = parent_indices
@property
def decorator(self):
"""Property that returns an appropriate decorator to enhance the
operators of the toolbox. The returned decorator assumes that the
individuals are returned by the operator. First the decorator calls
the underlying operation and then calls the :func:`update` function
with what has been returned by the operator. Finally, it returns the
individuals with their history parameters modified according to the
update function.
"""
def decFunc(func):
def wrapFunc(*args, **kargs):
individuals = func(*args, **kargs)
self.update(individuals)
return individuals
return wrapFunc
return decFunc
def getGenealogy(self, individual, max_depth=float("inf")):
"""Provide the genealogy tree of an *individual*. The individual must
have an attribute :attr:`history_index` as defined by
:func:`~deap.tools.History.update` in order to retrieve its associated
genealogy tree. The returned graph contains the parents up to
*max_depth* variations before this individual. If not provided
the maximum depth is up to the begining of the evolution.
:param individual: The individual at the root of the genealogy tree.
:param max_depth: The approximate maximum distance between the root
(individual) and the leaves (parents), optional.
:returns: A dictionary where each key is an individual index and the
values are a tuple corresponding to the index of the parents.
"""
gtree = {}
visited = set() # Adds memory to the breadth first search
def genealogy(index, depth):
if index not in self.genealogy_tree:
return
depth += 1
if depth > max_depth:
return
parent_indices = self.genealogy_tree[index]
gtree[index] = parent_indices
for ind in parent_indices:
if ind not in visited:
genealogy(ind, depth)
visited.add(ind)
genealogy(individual.history_index, 0)
return gtree
class Statistics(object):
"""Object that compiles statistics on a list of arbitrary objects.
When created the statistics object receives a *key* argument that
is used to get the values on which the function will be computed.
If not provided the *key* argument defaults to the identity function.
The value returned by the key may be a multi-dimensional object, i.e.:
a tuple or a list, as long as the statistical function registered
support it. So for example, statistics can be computed directly on
multi-objective fitnesses when using numpy statistical function.
:param key: A function to access the values on which to compute the
statistics, optional.
::
>>> s = Statistics()
>>> s.register("mean", numpy.mean)
>>> s.register("max", max)
>>> s.compile([1, 2, 3, 4])
{'max': 4, 'mean': 2.5}
>>> s.compile([5, 6, 7, 8])
{'max': 8, 'mean': 6.5}
"""
def __init__(self, key=identity):
self.key = key
self.functions = dict()
self.fields = []
def register(self, name, function, *args, **kargs):
"""Register a *function* that will be applied on the sequence each
time :meth:`record` is called.
:param name: The name of the statistics function as it would appear
in the dictionnary of the statistics object.
:param function: A function that will compute the desired statistics
on the data as preprocessed by the key.
:param argument: One or more argument (and keyword argument) to pass
automatically to the registered function when called,
optional.
"""
self.functions[name] = partial(function, *args, **kargs)
self.fields.append(name)
def compile(self, data):
"""Apply to the input sequence *data* each registered function
and return the results as a dictionnary.
:param data: Sequence of objects on which the statistics are computed.
"""
values = tuple(self.key(elem) for elem in data)
entry = dict()
for key, func in self.functions.iteritems():
entry[key] = func(values)
return entry
class MultiStatistics(dict):
"""Dictionary of :class:`Statistics` object allowing to compute
statistics on multiple keys using a single call to :meth:`compile`. It
takes a set of key-value pairs associating a statistics object to a
unique name. This name can then be used to retrieve the statistics object.
The following code computes statistics simultaneously on the length and
the first value of the provided objects.
::
>>> len_stats = Statistics(key=len)
>>> itm0_stats = Statistics(key=itemgetter(0))
>>> mstats = MultiStatistics(length=len_stats, item=itm0_stats)
>>> mstats.register("mean", numpy.mean, axis=0)
>>> mstats.register("max", numpy.max, axis=0)
>>> mstats.compile([[0.0, 1.0, 1.0, 5.0], [2.0, 5.0]])
{'length': {'max': 4, 'mean': 3.0}, 'item': {'max': 2.0, 'mean': 1.0}}
"""
def compile(self, data):
"""Calls :meth:`Statistics.compile` with *data* of each
:class:`Statistics` object.
:param data: Sequence of objects on which the statistics are computed.
"""
record = {}
for name, stats in self.items():
record[name] = stats.compile(data)
return record
@property
def fields(self):
return sorted(self.keys())
def register(self, name, function, *args, **kargs):
"""Register a *function* in each :class:`Statistics` object.
:param name: The name of the statistics function as it would appear
in the dictionnary of the statistics object.
:param function: A function that will compute the desired statistics
on the data as preprocessed by the key.
:param argument: One or more argument (and keyword argument) to pass
automatically to the registered function when called,
optional.
"""
for stats in self.values():
stats.register(name, function, *args, **kargs)
class Logbook(list):
"""Evolution records as a chronological list of dictionaries.
Data can be retrieved via the :meth:`select` method given the appropriate
names.
The :class:`Logbook` class may also contain other logbooks refered to
as chapters. Chapters are used to store information associated to a
specific part of the evolution. For example when computing statistics
on different components of individuals (namely :class:`MultiStatistics`),
chapters can be used to distinguish the average fitness and the average
size.
"""
def __init__(self):
self.buffindex = 0
self.chapters = defaultdict(Logbook)
"""Dictionary containing the sub-sections of the logbook which are also
:class:`Logbook`. Chapters are automatically created when the right hand
side of a keyworded argument, provided to the *record* function, is a
dictionnary. The keyword determines the chapter's name. For example, the
following line adds a new chapter "size" that will contain the fields
"max" and "mean". ::
logbook.record(gen=0, size={'max' : 10.0, 'mean' : 7.5})
To access a specific chapter, use the name of the chapter as a
dictionnary key. For example, to access the size chapter and select
the mean use ::
logbook.chapters["size"].select("mean")
Compiling a :class:`MultiStatistics` object returns a dictionary
containing dictionnaries, therefore when recording such an object in a
logbook using the keyword argument unpacking operator (**), chapters
will be automatically added to the logbook.
::
>>> fit_stats = Statistics(key=attrgetter("fitness.values"))
>>> size_stats = Statistics(key=len)
>>> mstats = MultiStatistics(fitness=fit_stats, size=size_stats)
>>> # [...]
>>> record = mstats.compile(population)
>>> logbook.record(**record)
>>> print logbook
fitness length
------------ ------------
max mean max mean
2 1 4 3
"""
self.columns_len = None
self.header = None
"""Order of the columns to print when using the :data:`stream` and
:meth:`__str__` methods. The syntax is a single iterable containing
string elements. For example, with the previously
defined statistics class, one can print the generation and the
fitness average, and maximum with
::
logbook.header = ("gen", "mean", "max")
If not set the header is built with all fields, in arbritrary order
on insertion of the first data. The header can be removed by setting
it to :data:`None`.
"""
self.log_header = True
"""Tells the log book to output or not the header when streaming the
first line or getting its entire string representation. This defaults
:data:`True`.
"""
def record(self, **infos):
"""Enter a record of event in the logbook as a list of key-value pairs.
The informations are appended chronogically to a list as a dictionnary.
When the value part of a pair is a dictionnary, the informations contained
in the dictionnary are recorded in a chapter entitled as the name of the
key part of the pair. Chapters are also Logbook.
"""
for key, value in infos.items():
if isinstance(value, dict):
self.chapters[key].record(**value)
del infos[key]
self.append(infos)
def select(self, *names):
"""Return a list of values associated to the *names* provided
in argument in each dictionary of the Statistics object list.
One list per name is returned in order.
::
>>> log = Logbook()
>>> log.record(gen = 0, mean = 5.4, max = 10.0)
>>> log.record(gen = 1, mean = 9.4, max = 15.0)
>>> log.select("mean")
[5.4, 9.4]
>>> log.select("gen", "max")
([0, 1], [10.0, 15.0])
With a :class:`MultiStatistics` object, the statistics for each
measurement can be retrieved using the :data:`chapters` member :
::
>>> log = Logbook()
>>> log.record(**{'gen' : 0, 'fit' : {'mean' : 0.8, 'max' : 1.5},
... 'size' : {'mean' : 25.4, 'max' : 67}})
>>> log.record(**{'gen' : 1, 'fit' : {'mean' : 0.95, 'max' : 1.7},
... 'size' : {'mean' : 28.1, 'max' : 71}})
>>> log.chapters['size'].select("mean")
[25.4, 28.1]
>>> log.chapters['fit'].select("gen", "max")
([0, 1], [1.5, 1.7])
"""
if len(names) == 1:
return [entry.get(names[0], None) for entry in self]
return tuple([entry.get(name, None) for entry in self] for name in names)
@property
def stream(self):
"""Retrieve the formatted not streamed yet entries of the database
including the headers.
::
>>> log = Logbook()
>>> log.append({'gen' : 0})
>>> print log.stream
gen
0
>>> log.append({'gen' : 1})
>>> print log.stream
1
"""
startindex, self.buffindex = self.buffindex, len(self)
return self.__str__(startindex)
def __delitem__(self, key):
if isinstance(key, slice):
for i, in range(*key.indices(len(self))):
self.pop(i)
for chapter in self.chapters.values():
chapter.pop(i)
else:
self.pop(key)
for chapter in self.chapters.values():
chapter.pop(key)
def pop(self, index=0):
"""Retrieve and delete element *index*. The header and stream will be
adjusted to follow the modification.
:param item: The index of the element to remove, optional. It defaults
to the first element.
You can also use the following syntax to delete elements.
::
del log[0]
del log[1::5]
"""
if index < self.buffindex:
self.buffindex -= 1
return super(self.__class__, self).pop(index)
def __txt__(self, startindex):
columns = self.header
if not columns:
columns = sorted(self[0].keys()) + sorted(self.chapters.keys())
if not self.columns_len or len(self.columns_len) != len(columns):
self.columns_len = map(len, columns)
chapters_txt = {}
offsets = defaultdict(int)
for name, chapter in self.chapters.items():
chapters_txt[name] = chapter.__txt__(startindex)
if startindex == 0:
offsets[name] = len(chapters_txt[name]) - len(self)
str_matrix = []
for i, line in enumerate(self[startindex:]):
str_line = []
for j, name in enumerate(columns):
if name in chapters_txt:
column = chapters_txt[name][i+offsets[name]]
else:
value = line.get(name, "")
string = "{0:n}" if isinstance(value, float) else "{0}"
column = string.format(value)
self.columns_len[j] = max(self.columns_len[j], len(column))
str_line.append(column)
str_matrix.append(str_line)
if startindex == 0 and self.log_header:
header = []
nlines = 1
if len(self.chapters) > 0:
nlines += max(map(len, chapters_txt.values())) - len(self) + 1
header = [[] for i in xrange(nlines)]
for j, name in enumerate(columns):
if name in chapters_txt:
length = max(len(line.expandtabs()) for line in chapters_txt[name])
blanks = nlines - 2 - offsets[name]
for i in xrange(blanks):
header[i].append(" " * length)
header[blanks].append(name.center(length))
header[blanks+1].append("-" * length)
for i in xrange(offsets[name]):
header[blanks+2+i].append(chapters_txt[name][i])
else:
length = max(len(line[j].expandtabs()) for line in str_matrix)
for line in header[:-1]:
line.append(" " * length)
header[-1].append(name)
str_matrix = chain(header, str_matrix)
template = "\t".join("{%i:<%i}" % (i, l) for i, l in enumerate(self.columns_len))
text = [template.format(*line) for line in str_matrix]
return text
def __str__(self, startindex=0):
text = self.__txt__(startindex)
return "\n".join(text)
class HallOfFame(object):
"""The hall of fame contains the best individual that ever lived in the
population during the evolution. It is lexicographically sorted at all
time so that the first element of the hall of fame is the individual that
has the best first fitness value ever seen, according to the weights
provided to the fitness at creation time.
The insertion is made so that old individuals have priority on new
individuals. A single copy of each individual is kept at all time, the
equivalence between two individuals is made by the operator passed to the
*similar* argument.
:param maxsize: The maximum number of individual to keep in the hall of
fame.
:param similar: An equivalence operator between two individuals, optional.
It defaults to operator :func:`operator.eq`.
The class :class:`HallOfFame` provides an interface similar to a list
(without being one completely). It is possible to retrieve its length, to
iterate on it forward and backward and to get an item or a slice from it.
"""
def __init__(self, maxsize, similar=eq):
self.maxsize = maxsize
self.keys = list()
self.items = list()
self.similar = similar
def update(self, population):
"""Update the hall of fame with the *population* by replacing the
worst individuals in it by the best individuals present in
*population* (if they are better). The size of the hall of fame is
kept constant.
:param population: A list of individual with a fitness attribute to
update the hall of fame with.
"""
if len(self) == 0 and self.maxsize !=0:
# Working on an empty hall of fame is problematic for the
# "for else"
self.insert(population[0])
for ind in population:
if ind.fitness > self[-1].fitness or len(self) < self.maxsize:
for hofer in self:
# Loop through the hall of fame to check for any
# similar individual
if self.similar(ind, hofer):
break
else:
# The individual is unique and strictly better than
# the worst
if len(self) >= self.maxsize:
self.remove(-1)
self.insert(ind)
def insert(self, item):
"""Insert a new individual in the hall of fame using the
:func:`~bisect.bisect_right` function. The inserted individual is
inserted on the right side of an equal individual. Inserting a new
individual in the hall of fame also preserve the hall of fame's order.
This method **does not** check for the size of the hall of fame, in a
way that inserting a new individual in a full hall of fame will not
remove the worst individual to maintain a constant size.
:param item: The individual with a fitness attribute to insert in the
hall of fame.
"""
item = deepcopy(item)
i = bisect_right(self.keys, item.fitness)
self.items.insert(len(self) - i, item)
self.keys.insert(i, item.fitness)
def remove(self, index):
"""Remove the specified *index* from the hall of fame.
:param index: An integer giving which item to remove.
"""
del self.keys[len(self) - (index % len(self) + 1)]
del self.items[index]
def clear(self):
"""Clear the hall of fame."""
del self.items[:]
del self.keys[:]
def __len__(self):
return len(self.items)
def __getitem__(self, i):
return self.items[i]
def __iter__(self):
return iter(self.items)
def __reversed__(self):
return reversed(self.items)
def __str__(self):
return str(self.items)
class ParetoFront(HallOfFame):
"""The Pareto front hall of fame contains all the non-dominated individuals
that ever lived in the population. That means that the Pareto front hall of
fame can contain an infinity of different individuals.
:param similar: A function that tels the Pareto front whether or not two
individuals are similar, optional.
The size of the front may become very large if it is used for example on
a continuous function with a continuous domain. In order to limit the number
of individuals, it is possible to specify a similarity function that will
return :data:`True` if the genotype of two individuals are similar. In that
case only one of the two individuals will be added to the hall of fame. By
default the similarity function is :func:`operator.eq`.
Since, the Pareto front hall of fame inherits from the :class:`HallOfFame`,
it is sorted lexicographically at every moment.
"""
def __init__(self, similar=eq):
HallOfFame.__init__(self, None, similar)
def update(self, population):
"""Update the Pareto front hall of fame with the *population* by adding
the individuals from the population that are not dominated by the hall
of fame. If any individual in the hall of fame is dominated it is
removed.
:param population: A list of individual with a fitness attribute to
update the hall of fame with.
"""
for ind in population:
is_dominated = False
has_twin = False
to_remove = []
for i, hofer in enumerate(self): # hofer = hall of famer
if hofer.fitness.dominates(ind.fitness):
is_dominated = True
break
elif ind.fitness.dominates(hofer.fitness):
to_remove.append(i)
elif ind.fitness == hofer.fitness and self.similar(ind, hofer):
has_twin = True
break
for i in reversed(to_remove): # Remove the dominated hofer
self.remove(i)
if not is_dominated and not has_twin:
self.insert(ind)
__all__ = ['HallOfFame', 'ParetoFront', 'History', 'Statistics', 'MultiStatistics', 'Logbook']
if __name__ == "__main__":
import doctest
from operator import itemgetter
import numpy
doctest.run_docstring_examples(Statistics, globals())
doctest.run_docstring_examples(Statistics.register, globals())
doctest.run_docstring_examples(Statistics.compile, globals())
doctest.run_docstring_examples(MultiStatistics, globals())
doctest.run_docstring_examples(MultiStatistics.register, globals())
doctest.run_docstring_examples(MultiStatistics.compile, globals())
| gpl-3.0 |
ddboline/kaggle_imdb_sentiment_model | backup/my_word2vec_model.py | 1 | 2182 | #!/usr/bin/python
import os
import pandas as pd
from KaggleWord2VecUtility import KaggleWord2VecUtility
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
import itertools
import nltk.data
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def clean_review_function(review):
list_of_sentences = KaggleWord2VecUtility.review_to_sentences(review, tokenizer, remove_stopwords=False)
return list_of_sentences
def my_model(nfeatures=100, run_test_data=False):
print 'nfeatures', nfeatures
labeledtrain_data = pd.read_csv('labeledTrainData.tsv', header=0, delimiter='\t', quoting=3)
unlabeledtrain_data = pd.read_csv('unlabeledTrainData.tsv', header=0, delimiter='\t', quoting=3)
test_data = pd.read_csv('testData.tsv', header=0, delimiter='\t', quoting=3)
print 'labeledtrain_data.shape', labeledtrain_data.shape
print 'unlabeledtrain_data.shape', unlabeledtrain_data.shape
print 'test_data.shape', test_data.shape
sentences = (pd.concat([labeledtrain_data['review'], unlabeledtrain_data['review']])).apply(clean_review_function)
print len(sentences)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print "Training Word2Vec model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sample = downsampling, seed=1)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
if __name__ == '__main__':
nfeatures = 100
for arg in os.sys.argv:
try:
nfeatures = int(arg)
except ValueError:
pass
my_model(nfeatures, run_test_data=True)
| mit |
xguse/scikit-bio | skbio/stats/ordination/tests/test_redundancy_analysis.py | 8 | 6413 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.testing as npt
import pandas as pd
from unittest import TestCase, main
from skbio import OrdinationResults
from skbio.stats.ordination import rda
from skbio.util import get_data_path, assert_ordination_results_equal
class TestRDAErrors(TestCase):
def setUp(self):
pass
def test_shape(self):
for n, p, n_, m in [(3, 4, 2, 1), (3, 4, 3, 10)]:
Y = pd.DataFrame(np.random.randn(n, p))
X = pd.DataFrame(np.random.randn(n_, m))
yield npt.assert_raises, ValueError, rda, Y, X, None, None
class TestRDAResults(TestCase):
# STATUS: L&L only shows results with scaling 1, and they agree
# with vegan's (module multiplying by a constant). I can also
# compute scaling 2, agreeing with vegan, but there are no written
# results in L&L.
def setUp(self):
"""Data from table 11.3 in Legendre & Legendre 1998."""
self.sample_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4',
'Site5', 'Site6', 'Site7', 'Site8', 'Site9']
self.feature_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5']
self.env_ids = map(str, range(4))
self.pc_ids = ['RDA1', 'RDA2', 'RDA3', 'RDA4', 'RDA5', 'RDA6', 'RDA7']
self.Y = pd.DataFrame(
np.loadtxt(get_data_path('example2_Y')),
index=self.sample_ids, columns=self.feature_ids)
self.X = pd.DataFrame(
np.loadtxt(get_data_path('example2_X')),
index=self.sample_ids, columns=self.env_ids)
def test_scaling1(self):
scores = rda(self.Y, self.X, scaling=1)
biplot_scores = pd.DataFrame(np.loadtxt(
get_data_path('example2_biplot_scaling1')))
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling1')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling1_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling1_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling1')),
index=self.sample_ids,
columns=self.pc_ids)
biplot_scores = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_biplot_scaling1')))
# These are wrong. See issue #1002
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
# These are wrong. See issue #1002
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_biplot_scores_labels=True,
decimal=6)
def test_scaling2(self):
scores = rda(self.Y, self.X, scaling=2)
biplot_scores = pd.DataFrame(np.loadtxt(
get_data_path('example2_biplot_scaling2')))
sample_constraints = pd.DataFrame(np.loadtxt(
get_data_path('example2_sample_constraints_scaling2')))
# Load data as computed with vegan 2.0-8
vegan_features = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_species_scaling2_from_vegan')),
index=self.feature_ids,
columns=self.pc_ids)
vegan_samples = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_site_scaling2_from_vegan')),
index=self.sample_ids,
columns=self.pc_ids)
sample_constraints = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_sample_constraints_scaling2')),
index=self.sample_ids,
columns=self.pc_ids)
biplot_scores = pd.DataFrame(
np.loadtxt(get_data_path(
'example2_biplot_scaling2')))
# These are wrong. See issue #1002
proportion_explained = pd.Series([0.44275783, 0.25614586,
0.15280354, 0.10497021,
0.02873375, 0.00987052,
0.00471828],
index=self.pc_ids)
# These are wrong. See issue #1002
eigvals = pd.Series([25.897954, 14.982578, 8.937841, 6.139956,
1.680705, 0.577350, 0.275984],
index=self.pc_ids)
exp = OrdinationResults(
'RDA', 'Redundancy Analysis',
samples=vegan_samples,
features=vegan_features,
sample_constraints=sample_constraints,
biplot_scores=biplot_scores,
proportion_explained=proportion_explained,
eigvals=eigvals)
assert_ordination_results_equal(scores, exp,
ignore_biplot_scores_labels=True,
decimal=6)
if __name__ == '__main__':
main()
| bsd-3-clause |
sandeepkbhat/pylearn2 | pylearn2/models/svm.py | 21 | 3386 | """Wrappers for SVM models."""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import numpy as np
import warnings
try:
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
except ImportError:
warnings.warn("Could not import sklearn.")
class OneVsRestClassifier(object):
"""
Dummy replacement for `sklearn.multiclass.OneVsRestClassifier`.
Parameters
----------
estimator : see `sklearn` doc.
See `sklearn` doc.
Notes
-----
This class is a dummy class included so that sphinx
can import DenseMulticlassSVM and document it even
when sklearn is not installed.
"""
def __init__(self, estimator):
raise RuntimeError("sklearn not available.")
class DenseMulticlassSVM(OneVsRestClassifier):
"""
sklearn does very different things behind the scenes depending
upon the exact identity of the class you use. The only way to
get an SVM implementation that works with dense data is to use
the `SVC` class, which implements one-against-one
classification. This wrapper uses it to implement one-against-
rest classification, which generally works better in my
experiments.
To avoid duplicating the training data, use only numpy ndarrays
whose tags.c_contigous flag is true, and which are in float64
format.
Parameters
----------
C : float
SVM regularization parameter.
See SVC.__init__ for details.
kernel : str
Type of kernel to use.
See SVC.__init__ for details.
gamma : float
Optional parameter of kernel.
See SVC.__init__ for details.
coef0 : float
Optional parameter of kernel.
See SVC.__init__ for details.
degree : int
Degree of kernel, if kernel is polynomial.
See SVC.__init__ for details.
"""
def __init__(self, C, kernel='rbf', gamma=1.0, coef0=1.0, degree=3):
estimator = SVC(C=C, kernel=kernel, gamma=gamma, coef0=coef0,
degree=degree)
super(DenseMulticlassSVM, self).__init__(estimator)
def fit(self, X, y):
"""
Fit underlying estimators.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples] or [n_samples, n_classes]
Multi-class targets. An indicator matrix turns on multilabel
classification.
Returns
-------
self
"""
super(DenseMulticlassSVM, self).fit(X, y)
return self
def decision_function(self, X):
"""
Returns the distance of each sample from the decision boundary for
each class.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A 2D ndarray with each row containing the input features for one
example.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
"""
return np.column_stack([estimator.decision_function(X)
for estimator in self.estimators_])
| bsd-3-clause |
c-wilson/klustaviewa | klustaviewa/control/controller.py | 1 | 9136 | """The Controller offers high-level methods to change the data."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import inspect
import numpy as np
import pandas as pd
from klustaviewa.control.processor import Processor
from klustaviewa.control.stack import Stack
from kwiklib.utils import logger as log
from kwiklib.dataio.selection import get_indices, select
from kwiklib.dataio.tools import get_array
# -----------------------------------------------------------------------------
# Utility functions
# -----------------------------------------------------------------------------
def get_pretty_arg(item):
if isinstance(item, (pd.Series)):
if item.size == 0:
return '[]'
elif item.size == 1:
return '[{0:s}]'.format(str(item.values[0]))
else:
return '[{0:s}, ..., {1:s}]'.format(*map(str, item.values[[0, -1]]))
if isinstance(item, (pd.Int64Index, pd.Index)):
if item.size == 0:
return '[]'
elif item.size == 1:
return '[{0:s}]'.format(str(item.values[0]))
else:
return '[{0:s}, ..., {1:s}]'.format(*map(str, item.values[[0, -1]]))
return str(item).replace('\n', '')
def get_pretty_action(method_name, args, kwargs, verb='Process'):
args_str = ', '.join(map(get_pretty_arg, args))
kwargs_str = ', '.join([key + '=' + str(val)
for key, val in kwargs.iteritems()])
if kwargs_str:
kwargs_str = ', ' + kwargs_str
return '{3:s} action {0:s}({1:s}{2:s})'.format(
method_name, args_str, kwargs_str, verb)
def log_action(action, prefix=''):
method_name, args, kwargs = action
description = kwargs.get('_description',
get_pretty_action(*action))
log.info(prefix + description)
def call_action(processor, action, suffix=''):
method_name, args, kwargs = action
kwargs = kwargs.copy()
kwargs.pop('_description', None)
return getattr(processor, method_name + suffix)(*args, **kwargs)
# -----------------------------------------------------------------------------
# Controller
# -----------------------------------------------------------------------------
class Controller(object):
"""Implement actions that can be undone and redone.
An Action object is:
(method_name, args, kwargs)
"""
def __init__(self, loader):
self.loader = loader
self.processor = Processor(loader)
# Create the action stack.
self.stack = Stack(maxsize=20)
# Internal action methods.
# ------------------------
def _process(self, method_name, *args, **kwargs):
"""Create, register, and process an action."""
# Create the action.
action = (method_name, args, kwargs)
# Add the action to the stack.
self.stack.add(action)
# Log the action.
log_action(action)
# Process the action.
output = call_action(self.processor, action)
return method_name, output or {}
# Public action methods.
# ----------------------
def merge_clusters(self, clusters):
clusters_to_merge = clusters
cluster_merged = self.loader.get_new_clusters(1)[0]
clusters_old = self.loader.get_clusters(clusters=clusters_to_merge)
cluster_groups = self.loader.get_cluster_groups(clusters_to_merge)
cluster_colors = self.loader.get_cluster_colors(clusters_to_merge)
return self._process('merge_clusters', clusters_old, cluster_groups,
cluster_colors, cluster_merged,
_description='Merged clusters {0:s} into {1:s}'.format(
get_pretty_arg(list(clusters)),
get_pretty_arg(cluster_merged)))
def split_clusters(self, clusters, spikes):
# Old clusters for all spikes to split.
clusters_old = self.loader.get_clusters(spikes=spikes)
assert np.all(np.in1d(clusters_old, clusters))
# Old cluster indices.
cluster_indices_old = np.unique(clusters_old)
nclusters = len(cluster_indices_old)
# New clusters indices.
clusters_indices_new = self.loader.get_new_clusters(nclusters)
# Generate new clusters array.
clusters_new = clusters_old.copy()
# Assign new clusters.
for cluster_old, cluster_new in zip(cluster_indices_old,
clusters_indices_new):
clusters_new[clusters_old == cluster_old] = cluster_new
cluster_groups = self.loader.get_cluster_groups(cluster_indices_old)
cluster_colors = self.loader.get_cluster_colors(cluster_indices_old)
return self._process('split_clusters', clusters,
clusters_old, cluster_groups, cluster_colors, clusters_new,
_description='Split clusters {0:s} into {1:s}'.format(
get_pretty_arg(list(cluster_indices_old)),
get_pretty_arg(list(clusters_indices_new)),
))
def split2_clusters(self, spikes, clusters):
# clusters is new
# Old clusters for all spikes to split.
clusters_old = self.loader.get_clusters(spikes=spikes)
# assert np.all(np.in1d(clusters_old, clusters))
# Old cluster indices.
cluster_indices_old = np.unique(clusters_old)
nclusters = len(cluster_indices_old)
# Renumber output of klustakwik.
clu_idx = np.unique(clusters)
nclusters_new = len(clu_idx)
# Get new clusters indices.
clusters_indices_new = self.loader.get_new_clusters(nclusters_new)
clu_renumber = np.zeros(clu_idx.max() + 1, dtype=np.int32)
clu_renumber[clu_idx] = clusters_indices_new
clusters_new = clu_renumber[clusters]
cluster_groups = self.loader.get_cluster_groups(cluster_indices_old)
cluster_colors = self.loader.get_cluster_colors(cluster_indices_old)
return self._process('split_clusters', get_array(cluster_indices_old),
clusters_old, cluster_groups, cluster_colors, clusters_new,
_description='Split2')
def change_cluster_color(self, cluster, color):
color_old = self.loader.get_cluster_colors(cluster)
color_new = color
clusters_selected = self.loader.get_clusters_selected()
return self._process('change_cluster_color', cluster, color_old,
color_new, clusters_selected,
_description='Changed cluster color of {0:s}'.format(get_pretty_arg(cluster)))
def move_clusters(self, clusters, group):
groups_old = self.loader.get_cluster_groups(clusters)
group_new = group
return self._process('move_clusters', clusters, groups_old, group_new,
_description='Moved clusters {0:s} to {1:s}'.format(
get_pretty_arg(clusters), get_pretty_arg(group)))
def rename_group(self, group, name):
name_old = self.loader.get_group_names(group)
name_new = name
return self._process('rename_group', group, name_old, name_new,
_description='Renamed group {0:s} to {1:s}'.format(
get_pretty_arg(group), get_pretty_arg(name)))
def change_group_color(self, group, color):
color_old = self.loader.get_group_colors(group)
color_new = color
return self._process('change_group_color', group, color_old, color_new,
_description='Changed color of group {0:s}'.format(get_pretty_arg(group)))
def add_group(self, group, name, color):
return self._process('add_group', group, name, color,
_description='Added group {0:s}'.format(get_pretty_arg(name)))
def remove_group(self, group):
name = self.loader.get_group_names(group)
color = self.loader.get_group_colors(group)
return self._process('remove_group', group, name, color,
_description='Removed group {0:s}'.format(get_pretty_arg(group)))
# Stack methods.
# --------------
def undo(self):
"""Undo an action if possible."""
action = self.stack.undo()
if action is None:
return None, None
# Get the undone action.
method_name, args, kwargs = action
# Log the action.
log_action(action, prefix='Undo: ')
# Undo the action.
output = call_action(self.processor, action, suffix='_undo')
return method_name + '_undo', output or {}
def redo(self):
action = self.stack.redo()
if action is None:
return
# Get the redo action.
method_name, args, kwargs = action
# Log the action.
log_action(action, prefix='Redo: ')
# Redo the action.
output = call_action(self.processor, action)
return method_name, output or {}
def can_undo(self):
return self.stack.can_undo()
def can_redo(self):
return self.stack.can_redo()
| bsd-3-clause |
timqian/sms-tools | lectures/3-Fourier-properties/plots-code/convolution-1.py | 24 | 1341 | import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
(fs, x2) = UF.wavread('../../../sounds/impulse-response.wav')
x1 = x[40000:44096]
N = 4096
plt.figure(1, figsize=(9.5, 7))
plt.subplot(3,2,1)
plt.title('x1 (ocean.wav)')
plt.plot(x1, 'b')
plt.axis([0,N,min(x1),max(x1)])
plt.subplot(3,2,2)
plt.title('x2 (impulse-response.wav)')
plt.plot(x2, 'b')
plt.axis([0,N,min(x2),max(x2)])
mX1, pX1 = DF.dftAnal(x1, np.ones(N), N)
mX1 = mX1 - max(mX1)
plt.subplot(3,2,3)
plt.title('X1')
plt.plot(mX1, 'r')
plt.axis([0,N/2,-70,0])
mX2, pX2 = DF.dftAnal(x2, np.ones(N), N)
mX2 = mX2 - max(mX2)
plt.subplot(3,2,4)
plt.title('X2')
plt.plot(mX2, 'r')
plt.axis([0,N/2,-70,0])
y = np.convolve(x1, x2)
mY, pY = DF.dftAnal(y[0:N], np.ones(N), N)
mY = mY - max(mY)
plt.subplot(3,2,5)
plt.title('DFT(x1 * x2)')
plt.plot(mY, 'r')
plt.axis([0,N/2,-70,0])
plt.subplot(3,2,6)
plt.title('X1 x X2')
mY1 = 20*np.log10(np.abs(fft(x1) * fft(x2)))
mY1 = mY1 - max(mY1)
plt.plot(mY1[0:N/2], 'r')
plt.axis([0,N/2,-84,0])
plt.tight_layout()
plt.savefig('convolution-1.png')
plt.show()
| agpl-3.0 |
colinsheppard/beam | src/main/python/counts_tools/exec/visualize_validations.py | 2 | 12809 | import ConfigParser
from os import path
import time
import sys
import geopandas as gpd
import matplotlib.ticker
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from utils import spatial_tools
__author__ = "Andrew A Campbell"
'''
Calculates and plots a network performance and demand metrics.
'''
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'ERROR: need to supply the path to the conifg file'
config_path = sys.argv[1]
conf = ConfigParser.ConfigParser()
conf.read(config_path)
# Complete Paths
taz_file = conf.get('Paths', 'taz_file')
county_file = conf.get('Paths', 'county_file')
# Root Dirs
matsim_out_root = conf.get('Paths', 'matsim_out_root')
validation_out_root = conf.get('Paths', 'validation_out_root')
# File Names
comm_val_total_name = conf.get('Paths', 'comm_val_total_name')
comm_val_fwy_name = conf.get('Paths', 'comm_val_fwy_name')
VIZ_1_out_name = conf.get('Paths', 'VIZ_1_out_name')
VIZ_2_time_out_name = conf.get('Paths', 'VIZ_2_time_out_name')
VIZ_3_dist_out_name = conf.get('Paths', 'VIZ_3_dist_out_name')
VIZ_4_tab_out_name = conf.get('Paths', 'VIZ_4_tab_out_name')
VIZ_4_stacked_out_name = conf.get('Paths', 'VIZ_4_stacked_out_name')
VIZ_4_bars_out_name = conf.get('Paths', 'VIZ_4_bars_out_name')
# Build complete paths
validation_out_root = conf.get('Paths', 'validation_out_root')
out_prefix = conf.get('Paths', 'out_prefix')
commute_validation_total_file = path.join(matsim_out_root, out_prefix + comm_val_total_name)
commute_validation_fwy_file = path.join(matsim_out_root, out_prefix + comm_val_fwy_name)
VIZ_1_out_file = path.join(validation_out_root, out_prefix + VIZ_1_out_name)
VIZ_2_time_out_file = path.join(validation_out_root, out_prefix + VIZ_2_time_out_name)
VIZ_3_dist_out_file = path.join(validation_out_root, out_prefix + VIZ_3_dist_out_name)
VIZ_4_tab_out_file = path.join(validation_out_root, out_prefix + VIZ_4_tab_out_name)
VIZ_4_stacked_out_file = path.join(validation_out_root, out_prefix + VIZ_4_stacked_out_name)
VIZ_4_bars_out_file = path.join(validation_out_root, out_prefix + VIZ_4_bars_out_name)
#Params
pop_total_commuters = np.float(conf.get('Params', 'pop_total_commuters')) # for scaling agent commuters up to total
taz_title = conf.get('Params', 'taz_title')
comm_title = conf.get('Params', 'comm_title')
min_time = np.float(conf.get('Params', 'min_time'))
max_time = np.float(conf.get('Params', 'max_time'))
barx_min = np.float(conf.get('Params', 'barx_min'))
barx_max = np.float(conf.get('Params', 'barx_max'))
####################################################################################################################
# VIZ_1 - total commute times by TAZ
####################################################################################################################
# Load the TAZ shapefile and validation files
taz_gdf = gpd.read_file(taz_file)
crs_orig = {'init' :'epsg:26910'}
val_total_gdf = spatial_tools.text_to_points_gdf(commute_validation_total_file, 'HomeX', 'HomeY', sep='\t', crs=crs_orig)
crs_new = {'init' :'epsg:4326'}
val_total_gdf.to_crs(crs_new, inplace=True) # project to WGS84
##
# Spatial join map points to TAZs
##
t0 = time.time()
val_gdf_1 = gpd.sjoin(val_total_gdf, taz_gdf, how='left', op='within')
print 'Method 1 Time: %f' % (time.time() - t0)
##
# Aggregate values to TAZ level
##
g_1 = val_gdf_1.groupby('taz_key')
means_1 = g_1.mean()
means_1['taz_key'] = means_1.index.astype(int)
# join with the geometries
merged_1 = taz_gdf.merge(means_1, how='outer', on='taz_key')
##
# Plots
##
# Total HW commute time
alpha = 1
linewidth = .1
clrmap = 'hot'
merged_1['TotalTimeH2W_Minutes'] = merged_1['TotalTimeH2W'] / 60.0
# If min and max time set, use them, else use the observed min and max from the data
if min_time:
vmin = min_time
vmax = max_time
else:
vmin = np.min(merged_1['TotalTimeH2W_Minutes'])
vmax = np.max(merged_1['TotalTimeH2W_Minutes'])
ax = merged_1.plot('TotalTimeH2W_Minutes', colormap=clrmap, vmin=vmin, vmax=vmax, figsize=(15, 12.5),
linewidth=linewidth, alpha=alpha)
ax.set_title(taz_title, fontsize=20)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
fig = ax.get_figure()
cax = fig.add_axes([0.91, 0.11, 0.03, 0.775])
sm = plt.cm.ScalarMappable(cmap=clrmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
# fake up the array of the scalar mappable. Urgh...
sm._A = []
cb = fig.colorbar(sm, cax=cax, alpha=alpha)
cb.set_label('minutes', fontsize=20)
cb.ax.tick_params(labelsize=15)
# using ColorBase
# cb1 = mpl.colorbar.ColorBase(cax, cmap=sm, orientation='vertical' )
plt.savefig(VIZ_1_out_file)
# plt.show()
# plt.close()
####################################################################################################################
# VIZ_2 and VIZ_3 - freeway commute times and distances tables
####################################################################################################################
# Load the freeway-only validation file
val_fwy_gdf = spatial_tools.text_to_points_gdf(commute_validation_fwy_file, 'HomeX', 'HomeY', sep='\t', crs=crs_orig)
val_fwy_gdf.to_crs(crs_new, inplace=True) # project to WGS84
##
# Spatial join to map points to Counties
##
county_gdf = gpd.read_file(county_file)
##
# Spatial join map points to Counties
##
val_total_gdf_2 = gpd.sjoin(val_total_gdf, county_gdf, how='left', op='within')
val_fwy_gdf_2 = gpd.sjoin(val_fwy_gdf, county_gdf, how='left', op='within')
##
# Aggregate values at County level
##
# Freeway
g_2_fwy = val_fwy_gdf_2.groupby('COUNTY')
means_2_fwy = g_2_fwy.mean()
means_2_fwy['COUNTY'] = means_2_fwy.index
# join with the geometries
merged_2_fwy = county_gdf.merge(means_2_fwy, how='outer', on='COUNTY')
# Totals
g_2_total = val_total_gdf_2.groupby('COUNTY')
means_2_total = g_2_total.mean()
means_2_total['COUNTY'] = means_2_total.index
# join with the geometries
merged_2_total = county_gdf.merge(means_2_total, how='outer', on='COUNTY')
##
# Tables
##
# Times (TotalH2W from totals, others from the fwy data)
comm_times = merged_2_total[['TotalTimeH2W', 'COUNTY']].merge(
merged_2_fwy[['DelayTimeH2W','TimeInCongestionH2W', 'COUNTY']], how='outer', on='COUNTY')
# comm_times_old = merged_2_fwy[['TotalTimeH2W', 'DelayTimeH2W','TimeInCongestionH2W']]
comm_times.index = comm_times['COUNTY']
comm_times.drop('COUNTY', axis=1, inplace=True)
# Totals
s = val_fwy_gdf.mean()[['TotalTimeH2W', 'DelayTimeH2W', 'TimeInCongestionH2W']]
# Replace the TotalTimeH2W with the totals value instead of the fwy value
s['TotalTimeH2W'] = val_total_gdf.mean()['TotalTimeH2W']
s.name = 'TOTALS'
comm_times = comm_times.append(s)
comm_times = (comm_times/60).round(1)
comm_times.sort(inplace=True)
comm_times.index.rename('', inplace=True)
comm_times.to_csv(VIZ_2_time_out_file, sep='\t')
# Distances
comm_dists = merged_2_fwy[['TotalDistH2W', 'DistInCongestionH2W']]
comm_dists.index = merged_2_fwy['COUNTY']
s = val_fwy_gdf.mean()[['TotalDistH2W', 'DistInCongestionH2W']]
s.name = 'TOTALS'
comm_dists = comm_dists.append(s)
comm_dists = (comm_dists/1609.344).round(1)
comm_dists.sort(inplace=True)
comm_dists.index.rename('', inplace=True)
comm_dists.to_csv(VIZ_3_dist_out_file, sep='\t')
####################################################################################################################
# VIZ_4 Commute patterns - horizontal stacked bars
####################################################################################################################
##
# Calculate the county-to-county h2w commute flows
##
# Get home counties from totals gdf
val_gdf = spatial_tools.text_to_points_gdf(commute_validation_total_file, 'HomeX', 'HomeY', sep='\t', crs=crs_orig)
val_gdf.to_crs(crs_new, inplace=True) # project to WGS84
val_gdf_4_home = gpd.sjoin(val_gdf, county_gdf, how='left', op='within')
# val_gdf_4_home.rename(index=str, columns={'COUNTY': 'COUNTY_HOME', 'geometry': 'geometry_home'}, inplace=True)
# Create a geometry column of work locations
x_col, y_col = 'WorkX', 'WorkY'
val_gdf_4_work = spatial_tools.text_to_points_gdf(commute_validation_total_file, x_col, y_col, sep='\t', crs=crs_orig)
val_gdf_4_work.to_crs(crs_new, inplace=True)
val_gdf_4_work = gpd.sjoin(val_gdf_4_work, county_gdf, how='left', op='within')
# Create merged df w/ home and work counties
merged_4 = pd.DataFrame({'COUNTY_HOME': val_gdf_4_home['COUNTY'],
'COUNTY_WORK': val_gdf_4_work['COUNTY'], 'cnt': 1.00})
# # Scale commuter counts up to the population total
# merged_4.cnt = np.true_divide(pop_total_commuters, merged_4.cnt.sum())
# Group by counties and get total counts
g_4 = merged_4.groupby(['COUNTY_HOME', 'COUNTY_WORK'])
# NOTE: we lose about 4% here because they are not mapped to any counties
# Scale commuter counts up to the population totals
commute_patterns = g_4.sum()
commute_patterns.cnt = np.true_divide(pop_total_commuters, commute_patterns.cnt.sum())*commute_patterns.cnt
commute_patterns.cnt = commute_patterns.cnt.round().astype(int)
commute_patterns.to_csv(VIZ_4_stacked_out_file, sep='\t')
cp = commute_patterns.unstack()
cp.to_csv(VIZ_4_tab_out_file, sep='\t')
##
# Build the visualization
##
counties = sorted(cp.index.values) # sorted list of county names
cp.sort(inplace=True)
cp.sort(axis=1, inplace=True)
# Get the widths of each individual bar in the horizontal stacks
widths = []
for i in range(cp.shape[0]):
# row = [-1 * n for n in cp.iloc[i,:].values.tolist()] + cp.iloc[:,i].values.tolist()[::-1]
row = cp.iloc[i, :].values.tolist() + cp.iloc[:, i].values.tolist()[::-1]
row = np.delete(row, [i, len(row) -i - 1]) # delete the self-self flows
widths.append(row)
widths = np.array(widths)
print widths
# Calc left edges of each bar
lefts = []
for i in range(cp.shape[0]):
left = [-1*np.sum(widths[i, 0:widths.shape[1]/2])]
left = np.append(left, left[0] + np.cumsum(widths[i, 0:-1]))
#
# for j in np.arange(widths.shape[1] - 1):
# left.append(left[j] + widths[i, j])
lefts.append(left)
lefts = np.array(lefts)
# Define colors for each bar. Skips the colors for self-self flows
cmmap_name = 'Set1'
cmap = plt.get_cmap(cmmap_name)
all_colors = [cmap(i) for i in np.linspace(0, 1, cp.shape[0])]
all_colors = all_colors + all_colors[::-1]
colors = [np.array(all_colors[1:-1])]
for i in range(1, cp.shape[0]):
c_left = all_colors[0:i]
c_mid = all_colors[i+1: -i -1]
c_right = all_colors[-i:]
colors.append(np.array(c_left + c_mid + c_right))
colors = np.array(colors)
# Build the stacked horizontal bar plot
pos = -1*np.arange(cp.shape[0]) - 0.5
fig = plt.figure(figsize=(16, 9))
plts = []
for i in np.arange(widths.shape[1]):
# for i in np.arange(3):
p = plt.barh(pos, widths[:, i], left=lefts[:, i], color=colors[:, i, :], alpha=0.5)
plts.append(p)
# patches = [p[0] for p in plts]
patches = [plts[i].patches[i+1] for i in np.arange(cp.shape[0]-1)]
patches.append(plts[cp.shape[0]].patches[0])
#
# face_colors = [plts[i].patches[i+1].get_facecolor() for i in np.arange(cp.shape[0]-1)]
# face_colors.append(plts[cp.shape[0]-1].patches[0].get_facecolor())
plt.legend(patches[0:9], counties, bbox_to_anchor=(0.0, -0.15, 1.0, .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.0, fontsize=15)
plt.yticks(pos+0.4, counties, fontsize=20)
# Fix the axis is barx_min set
# if barx_min:
# ox = plt.axis()
# plt.axis([barx_min, barx_max, ox[2], ox[3]])
ax = fig.axes[0]
ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, b: format(int(x), ',')))
# ax.set_xticklabels(ax.get_xticklabels(), fontsize=20)
ax.set_title(comm_title, fontsize=25)
plt.setp(ax.get_xticklabels(), fontsize=20)
plt.savefig(VIZ_4_bars_out_file, bbox_inches='tight')
# plt.show()
# plt.close()
| gpl-3.0 |
piskvorky/gensim | gensim/test/test_d2vmodel.py | 2 | 1579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking D2VTransformer class.
"""
import unittest
import logging
from gensim.sklearn_api import D2VTransformer
from gensim.test.utils import common_texts
class IteratorForIterable:
"""Iterator capable of folding into list."""
def __init__(self, iterable):
self._data = iterable
self._index = 0
def __next__(self):
if len(self._data) > self._index:
result = self._data[self._index]
self._index += 1
return result
raise StopIteration
class IterableWithoutZeroElement:
"""
Iterable, emulating pandas.Series behaviour without 0-th element.
Equivalent to calling `series.index += 1`.
"""
def __init__(self, data):
self.data = data
def __getitem__(self, key):
if key == 0:
raise KeyError("Emulation of absence of item with key 0.")
return self.data[key]
def __iter__(self):
return IteratorForIterable(self.data)
class TestD2VTransformer(unittest.TestCase):
def TestWorksWithIterableNotHavingElementWithZeroIndex(self):
a = IterableWithoutZeroElement(common_texts)
transformer = D2VTransformer(min_count=1, vector_size=5)
transformer.fit(a)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| lgpl-2.1 |
lancezlin/pylearn2 | pylearn2/cross_validation/tests/test_dataset_iterators.py | 49 | 6535 | """
Test cross-validation dataset iterators.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_dataset_k_fold():
"""Test DatasetKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_k_fold():
"""Test StratifiedDatasetKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_shuffle_split():
"""Test DatasetShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_shuffle_split():
"""Test StratifiedDatasetShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_validation_k_fold():
"""Test DatasetValidKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetValidationKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_validation_k_fold():
"""Test StratifiedDatasetValidKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetValidationKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_validation_shuffle_split():
"""Test DatasetValidShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetValidationShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_validation_shuffle_split():
"""Test StratifiedDatasetValidShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetValidationShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_which_set():
"""Test which_set selector."""
skip_if_no_sklearn()
# one label
this_yaml = test_yaml_which_set % {'which_set': 'train'}
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
# multiple labels
this_yaml = test_yaml_which_set % {'which_set': ['train', 'test']}
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
# improper label (iterator only returns 'train' and 'test' subsets)
this_yaml = test_yaml_which_set % {'which_set': 'valid'}
try:
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
raise AssertionError
except ValueError:
pass
# bogus label (not in approved list)
this_yaml = test_yaml_which_set % {'which_set': 'bogus'}
try:
yaml_parse.load(this_yaml)
raise AssertionError
except ValueError:
pass
def test_no_targets():
"""Test cross-validation without targets."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_no_targets)
trainer.main_loop()
test_yaml_dataset_iterator = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.%(dataset_iterator)s {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
test_yaml_which_set = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
which_set: %(which_set)s,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
test_yaml_no_targets = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 0,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
| bsd-3-clause |
ContinuumIO/dask | dask/dataframe/io/tests/test_sql.py | 1 | 13359 | from contextlib import contextmanager
import io
import pytest
# import dask
from dask.dataframe.io.sql import read_sql_table
from dask.dataframe.utils import assert_eq, PANDAS_GT_0240
from dask.utils import tmpfile
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
pytest.importorskip("sqlite3")
np = pytest.importorskip("numpy")
data = """
name,number,age,negish
Alice,0,33,-5
Bob,1,40,-3
Chris,2,22,3
Dora,3,16,5
Edith,4,53,0
Francis,5,30,0
Garreth,6,20,0
"""
df = pd.read_csv(io.StringIO(data), index_col="number")
@pytest.yield_fixture
def db():
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=True, if_exists="replace")
yield uri
def test_empty(db):
from sqlalchemy import create_engine, MetaData, Table, Column, Integer
with tmpfile() as f:
uri = "sqlite:///%s" % f
metadata = MetaData()
engine = create_engine(uri)
table = Table(
"empty_table",
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
)
metadata.create_all(engine)
dask_df = read_sql_table(table.name, uri, index_col="id", npartitions=1)
assert dask_df.index.name == "id"
assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
@pytest.mark.skip(
reason="Requires a postgres server. Sqlite does not support multiple schemas."
)
def test_empty_other_schema():
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, event, DDL
# Database configurations.
pg_host = "localhost"
pg_port = "5432"
pg_user = "user"
pg_pass = "pass"
pg_db = "db"
db_url = "postgresql://%s:%s@%s:%s/%s" % (pg_user, pg_pass, pg_host, pg_port, pg_db)
# Create an empty table in a different schema.
table_name = "empty_table"
schema_name = "other_schema"
engine = create_engine(db_url)
metadata = MetaData()
table = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("col2", Integer),
schema=schema_name,
)
# Create the schema and the table.
event.listen(
metadata, "before_create", DDL("CREATE SCHEMA IF NOT EXISTS %s" % schema_name)
)
metadata.create_all(engine)
# Read the empty table from the other schema.
dask_df = read_sql_table(
table.name, db_url, index_col="id", schema=table.schema, npartitions=1
)
# Validate that the retrieved table is empty.
assert dask_df.index.name == "id"
assert dask_df.col2.dtype == np.dtype("int64")
pd_dataframe = dask_df.compute()
assert pd_dataframe.empty is True
# Drop the schema and the table.
engine.execute("DROP SCHEMA IF EXISTS %s CASCADE" % schema_name)
def test_needs_rational(db):
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{
"a": list("ghjkl"),
"b": [now + i * d for i in range(5)],
"c": [True, True, False, True, True],
}
)
df = df.append(
[
{"a": "x", "b": now + d * 1000, "c": None},
{"a": None, "b": now + d * 1001, "c": None},
]
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
# one partition contains NULL
data = read_sql_table("test", uri, npartitions=2, index_col="b")
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool})) # bools are coerced
# one partition contains NULL, but big enough head
data = read_sql_table("test", uri, npartitions=2, index_col="b", head_rows=12)
df2 = df.set_index("b")
assert_eq(data, df2)
# empty partitions
data = read_sql_table("test", uri, npartitions=20, index_col="b")
part = data.get_partition(12).compute()
assert part.dtypes.tolist() == ["O", bool]
assert part.empty
df2 = df.set_index("b")
assert_eq(data, df2.astype({"c": bool}))
# explicit meta
data = read_sql_table("test", uri, npartitions=2, index_col="b", meta=df2[:0])
part = data.get_partition(1).compute()
assert part.dtypes.tolist() == ["O", "O"]
df2 = df.set_index("b")
assert_eq(data, df2)
def test_simple(db):
# single chunk
data = read_sql_table("test", db, npartitions=2, index_col="number").compute()
assert (data.name == df.name).all()
assert data.index.name == "number"
assert_eq(data, df)
def test_npartitions(db):
data = read_sql_table(
"test", db, columns=list(df.columns), npartitions=2, index_col="number"
)
assert len(data.divisions) == 3
assert (data.name.compute() == df.name).all()
data = read_sql_table(
"test", db, columns=["name"], npartitions=6, index_col="number"
)
assert_eq(data, df[["name"]])
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=2 ** 30,
index_col="number",
)
assert data.npartitions == 1
assert (data.name.compute() == df.name).all()
data_1 = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=2 ** 30,
index_col="number",
head_rows=1,
)
assert data_1.npartitions == 1
assert (data_1.name.compute() == df.name).all()
data = read_sql_table(
"test",
db,
columns=list(df.columns),
bytes_per_chunk=250,
index_col="number",
head_rows=1,
)
assert data.npartitions == 2
def test_divisions(db):
data = read_sql_table(
"test", db, columns=["name"], divisions=[0, 2, 4], index_col="number"
)
assert data.divisions == (0, 2, 4)
assert data.index.max().compute() == 4
assert_eq(data, df[["name"]][df.index <= 4])
def test_division_or_partition(db):
with pytest.raises(TypeError):
read_sql_table(
"test",
db,
columns=["name"],
index_col="number",
divisions=[0, 2, 4],
npartitions=3,
)
out = read_sql_table("test", db, index_col="number", bytes_per_chunk=100)
m = out.map_partitions(
lambda d: d.memory_usage(deep=True, index=True).sum()
).compute()
assert (50 < m).all() and (m < 200).all()
assert_eq(out, df)
def test_range(db):
data = read_sql_table("test", db, npartitions=2, index_col="number", limits=[1, 4])
assert data.index.min().compute() == 1
assert data.index.max().compute() == 4
def test_datetimes():
import datetime
now = datetime.datetime.now()
d = datetime.timedelta(seconds=1)
df = pd.DataFrame(
{"a": list("ghjkl"), "b": [now + i * d for i in range(2, -3, -1)]}
)
with tmpfile() as f:
uri = "sqlite:///%s" % f
df.to_sql("test", uri, index=False, if_exists="replace")
data = read_sql_table("test", uri, npartitions=2, index_col="b")
assert data.index.dtype.kind == "M"
assert data.divisions[0] == df.b.min()
df2 = df.set_index("b")
assert_eq(data.map_partitions(lambda x: x.sort_index()), df2.sort_index())
def test_with_func(db):
from sqlalchemy import sql
index = sql.func.abs(sql.column("negish")).label("abs")
# function for the index, get all columns
data = read_sql_table("test", db, npartitions=2, index_col=index)
assert data.divisions[0] == 0
part = data.get_partition(0).compute()
assert (part.index == 0).all()
# now an arith op for one column too; it's name will be 'age'
data = read_sql_table(
"test",
db,
npartitions=2,
index_col=index,
columns=[index, -(sql.column("age"))],
)
assert (data.age.compute() < 0).all()
# a column that would have no name, give it a label
index = (-(sql.column("negish"))).label("index")
data = read_sql_table(
"test", db, npartitions=2, index_col=index, columns=["negish", "age"]
)
d = data.compute()
assert (-d.index == d["negish"]).all()
def test_no_nameless_index(db):
from sqlalchemy import sql
index = -(sql.column("negish"))
with pytest.raises(ValueError):
read_sql_table(
"test", db, npartitions=2, index_col=index, columns=["negish", "age", index]
)
index = sql.func.abs(sql.column("negish"))
# function for the index, get all columns
with pytest.raises(ValueError):
read_sql_table("test", db, npartitions=2, index_col=index)
def test_select_from_select(db):
from sqlalchemy import sql
s1 = sql.select([sql.column("number"), sql.column("name")]).select_from(
sql.table("test")
)
out = read_sql_table(s1, db, npartitions=2, index_col="number")
assert_eq(out, df[["name"]])
def test_extra_connection_engine_keywords(capsys, db):
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": False}
).compute()
# no captured message from the stdout with the echo=False parameter (this is the default)
out, err = capsys.readouterr()
assert "SELECT" not in out
assert_eq(data, df)
# with the echo=True sqlalchemy parameter, you should get all SQL queries in the stdout
data = read_sql_table(
"test", db, npartitions=2, index_col="number", engine_kwargs={"echo": True}
).compute()
out, err = capsys.readouterr()
assert "WHERE test.number >= ? AND test.number < ?" in out
assert "WHERE test.number >= ? AND test.number <= ?" in out
assert_eq(data, df)
def test_no_character_index_without_divisions(db):
# attempt to read the sql table with a character index and no divisions
with pytest.raises(TypeError):
read_sql_table("test", db, npartitions=2, index_col="name", divisions=None)
@contextmanager
def tmp_db_uri():
with tmpfile() as f:
yield "sqlite:///%s" % f
@pytest.mark.parametrize("npartitions", (1, 2))
@pytest.mark.parametrize("parallel", (False, True))
def test_to_sql(npartitions, parallel):
df_by_age = df.set_index("age")
df_appended = pd.concat([df, df,])
ddf = dd.from_pandas(df, npartitions)
ddf_by_age = ddf.set_index("age")
# Simple round trip test: use existing "number" index_col
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "number")
assert_eq(df, result)
# Test writing no index, and reading back in with one of the other columns as index (`read_sql_table` requires
# an index_col)
with tmp_db_uri() as uri:
ddf.to_sql("test", uri, parallel=parallel, index=False)
result = read_sql_table("test", uri, "negish")
assert_eq(df.set_index("negish"), result)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index by "age" instead
with tmp_db_uri() as uri:
ddf_by_age.to_sql("test", uri, parallel=parallel)
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Index column can't have "object" dtype if no partitions are provided
with tmp_db_uri() as uri:
ddf.set_index("name").to_sql("test", uri)
with pytest.raises(
TypeError,
match='Provided index column is of type "object". If divisions is not provided the index column type must be numeric or datetime.', # noqa: E501
):
read_sql_table("test", uri, "name")
# Test various "if_exists" values
with tmp_db_uri() as uri:
ddf.to_sql("test", uri)
# Writing a table that already exists fails
with pytest.raises(ValueError, match="Table 'test' already exists"):
ddf.to_sql("test", uri)
ddf.to_sql("test", uri, parallel=parallel, if_exists="append")
result = read_sql_table("test", uri, "number")
assert_eq(df_appended, result)
ddf_by_age.to_sql("test", uri, parallel=parallel, if_exists="replace")
result = read_sql_table("test", uri, "age")
assert_eq(df_by_age, result)
# Verify number of partitions returned, when compute=False
with tmp_db_uri() as uri:
result = ddf.to_sql("test", uri, parallel=parallel, compute=False)
# the first result is from the "meta" insert
actual = len(result.compute())
assert actual == npartitions
def test_to_sql_kwargs():
ddf = dd.from_pandas(df, 2)
with tmp_db_uri() as uri:
# "method" keyword is allowed iff pandas>=0.24.0
if PANDAS_GT_0240:
ddf.to_sql("test", uri, method="multi")
else:
with pytest.raises(
NotImplementedError,
match=r"'method' requires pandas>=0.24.0. You have version 0.23.\d",
):
ddf.to_sql("test", uri, method="multi")
# Other, unknown keywords always disallowed
with pytest.raises(
TypeError, match="to_sql\\(\\) got an unexpected keyword argument 'unknown'"
):
ddf.to_sql("test", uri, unknown=None)
| bsd-3-clause |
faneshion/MatchZoo | setup.py | 1 | 1863 | import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('matchzoo/version.py').read())
short_description = 'Facilitating the design, comparison and sharing of deep text matching models.'
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'keras >= 2.3.0',
'nltk >= 3.2.3',
'numpy >= 1.14',
'tqdm >= 4.19.4',
'dill >= 0.2.7.1',
'pandas >= 0.23.1',
'networkx >= 2.1',
'h5py >= 2.8.0',
'hyperopt >= 0.1.1'
]
extras_requires = {
'tests': [
'coverage >= 4.3.4',
'codecov >= 2.0.15',
'pytest >= 3.0.3',
'pytest-cov >= 2.4.0',
'flake8 >= 3.6.0',
'flake8_docstrings >= 1.0.2'],
}
setup(
name="MatchZoo",
version=__version__,
author="Yixing Fan, Bo Wang, Zeyi Wang, Liang Pang, Liu Yang, Qinghua Wang, etc.",
author_email="[email protected]",
description=(short_description),
license="Apache 2.0",
keywords="text matching models",
url="https://github.com/NTMC-Community/MatchZoo",
packages=find_packages(),
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3.6'
],
install_requires=install_requires,
extras_require=extras_requires
)
| apache-2.0 |
gsnyder206/mock-surveys | MAST_HLSP/render_script.py | 1 | 8480 |
import astropy
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as pyplot
import os
import scipy
import scipy.ndimage
import scipy.ndimage.interpolation
import congrid #http://scipy-cookbook.readthedocs.io/items/Rebinning.html#Example-3
import make_color_image #written by Gregory Snyder; Also in gsnyder206/synthetic-image-morph
sq_arcsec_per_sr = 42545170296.0
c = 3.0e8
hlsp_dir='/astro/snyder_lab2/Illustris/Lightcones/Lightcone_Catalog_Images/'
def do_hst_illustris(fieldstr,alph,Q,rf,gf,bf):
#in nJy
fielda_f435 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f435w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f606 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f606w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f775 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f775w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f814 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f814w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f850 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f850lp_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f105 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-wfc3_f105w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f125 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-wfc3_f125w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f140 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-wfc3_f140w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
fielda_f160 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-wfc3_f160w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')['IMAGE_PSF'].data
ra= rf * (0.25*fielda_f105 +0.25*fielda_f125 + 0.25*fielda_f140 + 0.25*fielda_f160)
ga= gf * (0.50*fielda_f850 + 0.50*fielda_f775)
ba= bf * (0.50*fielda_f435 + 0.50*fielda_f606)
gnew=congrid.congrid(ga,ra.shape)*4.0 #preserve surface brightness
bnew=congrid.congrid(ba,ra.shape)*4.0
print(fielda_f435.shape)
print(ra.shape,ga.shape,gnew.shape)
rgb_field = make_color_image.make_interactive_nasa(bnew,gnew,ra,alph,Q)
f1 = pyplot.figure(figsize=(10.0,10.0), dpi=600)
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0)
axi=f1.add_subplot(111)
axi.imshow(rgb_field,interpolation='nearest',aspect='auto',origin='lower')
f1.savefig('illustris_render_'+fieldstr+'.pdf',dpi=600)
pyplot.close(f1)
def do_jwst_illustris(fieldstr,alph,Q,rf,gf,bf,x=None,y=None,n=None):
#in nJy
ui='IMAGE_PSF'
fielda_f435 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f435w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f606 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f606w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f775 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_hst-acs_f775w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f090 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f090w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f115 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f115w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f150 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f150w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f200 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f200w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f277 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f277w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f356 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f356w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
fielda_f444 =fits.open(hlsp_dir+'mag30-'+fieldstr+'-11-10/hlsp_misty_illustris_jwst-nircam_f444w_mag30-'+fieldstr+'-11-10_v1_lightcone.fits')[ui].data
ra= rf * (0.50*fielda_f150 + 0.50*fielda_f200)
ga= gf * (0.50*fielda_f090 + 0.50*fielda_f115)
ba= bf * (0.33*fielda_f435 + 0.33*fielda_f606 + 0.33*fielda_f775)
gnew=congrid.congrid(ga,ra.shape)*1.0 #preserve surface brightness
bnew=congrid.congrid(ba,ra.shape)*1.0
print(fielda_f435.shape)
print(ra.shape,ga.shape,gnew.shape)
if n is not None:
rgb_field = make_color_image.make_interactive_nasa(bnew[x:x+n,y:y+n],gnew[x:x+n,y:y+n],ra[x:x+n,y:y+n],alph,Q)
else:
rgb_field = make_color_image.make_interactive_nasa(bnew,gnew,ra,alph,Q)
f1 = pyplot.figure(figsize=(10.0,10.0), dpi=600)
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0)
axi=f1.add_subplot(111)
axi.imshow(rgb_field,interpolation='nearest',aspect='auto',origin='lower')
if n is not None:
saven='illustris_jwstzoom_'+fieldstr+'.pdf'
else:
saven='illustris_jwst_'+fieldstr+'.pdf'
f1.savefig(saven,dpi=600)
pyplot.close(f1)
def do_xdf(alph,Q,rf,gf,bf):
xdf_file='/astro/snyder_lab2/Illustris/Illustris_MockUDFs/L75n1820FP/XDF/HST_XDF2012_SunriseUnits_60mas.fits'
xdf_hdus=fits.open(xdf_file)
print(xdf_hdus.info())
pixsize_arcsec=0.06
pixel_Sr = (pixsize_arcsec**2)/sq_arcsec_per_sr
to_nJy_per_Sr_perlambdasqum = (1.0e9)*(1.0e14)/c
to_nJy_per_pix = to_nJy_per_Sr_perlambdasqum*pixel_Sr
dx=200 ; dy=200
xx1=2200+dx
xx2=5042+dx
xy1=2200+dy
xy2=5042+dy
#gives 2.841 arcmin
xdf_f435 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F435W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (0.435)**2
xdf_f606 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F606W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (0.606)**2
xdf_f775 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F775W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (0.775)**2
xdf_f814 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F814W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (0.814)**2
xdf_f850 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F850LP'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (0.850)**2
xdf_f105 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F105W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (1.05)**2
xdf_f125 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F125W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (1.25)**2
xdf_f140 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F140W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (1.40)**2
xdf_f160 = scipy.ndimage.interpolation.rotate( fits.open(xdf_file)['F160W'].data, 42)[xx1:xx2,xy1:xy2] * to_nJy_per_pix * (1.60)**2
print(xdf_f435.shape)
rx= rf * (0.25*xdf_f105 +0.25*xdf_f125 + 0.25*xdf_f140 + 0.25*xdf_f160)
gx= gf * (0.50*xdf_f850 + 0.50*xdf_f775)
bx= bf * (0.50*xdf_f435 + 0.50*xdf_f606)
rgb_xdf = make_color_image.make_interactive_nasa(bx,gx,rx,alph,Q)
f1 = pyplot.figure(figsize=(10.0,10.0), dpi=600)
pyplot.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0,wspace=0.0,hspace=0.0)
axi=f1.add_subplot(111)
axi.imshow(rgb_xdf,interpolation='nearest',aspect='auto',origin='lower')
f1.savefig('xdf_render.pdf',dpi=600)
pyplot.close(f1)
return
if __name__=="__main__":
rf= 1.0
gf= 1.0
bf= 1.0
alph=1.0
Q=6.0
#plot real XDF
#do_xdf(alph,Q,rf,gf,bf)
#Illustris XDF analogue
#do_hst_illustris('fielda',alph,Q,rf,gf,bf)
#do_hst_illustris('fieldb',alph,Q,rf,gf,bf)
#do_hst_illustris('fieldc',alph,Q,rf,gf,bf)
#Illustris HST+ JWST
alph=2.0
Q=7.0
rf=1.3 ; gf=1 ; bf=1.2
#do_jwst_illustris('fielda',alph,Q,rf,gf,bf)
#do_jwst_illustris('fieldb',alph,Q,rf,gf,bf)
#do_jwst_illustris('fieldc',alph,Q,rf,gf,bf)
do_jwst_illustris('fieldc',alph,Q,rf,gf,bf,x=1200,y=4600,n=600)
| mit |
ClimbsRocks/auto_ml | tests/core_tests/api_coverage_tests_classifiers.py | 1 | 17000 | # This file is just to test passing a bunch of different parameters into train to make sure that things work
# At first, it is not necessarily testing whether those things have the intended effect or not
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
import numpy as np
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_perform_feature_selection_false_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_selection=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
# For some reason, this test now causes a Segmentation Default on travis when run on python 3.5.
# home/travis/.travis/job_stages: line 53: 8810 Segmentation fault (core dumped) nosetests -v --with-coverage --cover-package auto_ml tests
# It didn't error previously
# It appears to be an environment issue (possibly cuased by running too many parallelized things, which only happens in a test suite), not an issue with auto_ml. So we'll run this test to make sure the library functionality works, but only on some environments
if os.environ.get('TRAVIS_PYTHON_VERSION', '0') != '3.5':
def test_compare_all_models_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, compare_all_models=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_perform_feature_selection_true_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_selection=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.124
def test_perform_feature_scaling_true_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_scaling=True)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_perform_feature_scaling_false_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, perform_feature_scaling=False)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.14
def test_user_input_func_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
def age_bucketing(data):
def define_buckets(age):
if age <= 17:
return 'youth'
elif age <= 40:
return 'adult'
elif age <= 60:
return 'adult2'
else:
return 'over_60'
if isinstance(data, dict):
data['age_bucket'] = define_buckets(data['age'])
else:
data['age_bucket'] = data.age.apply(define_buckets)
return data
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
, 'age_bucket': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, user_input_func=age_bucketing)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.16
assert -0.16 < first_score < -0.135
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert -0.16 < second_score < -0.135
def test_binary_classification_predict_on_Predictor_instance():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.train_basic_binary_classifier(df_titanic_train)
predictions = ml_predictor.predict(df_titanic_test)
test_score = accuracy_score(predictions, df_titanic_test.survived)
# Make sure our score is good, but not unreasonably good
print(test_score)
assert .77 < test_score < .805
def test_multilabel_classification_predict_on_Predictor_instance():
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
# Note that this does not take 'text' into account, intentionally (as that takes a while longer to train)
ml_predictor = utils.train_basic_multilabel_classifier(df_twitter_train)
predictions = ml_predictor.predict(df_twitter_test)
test_score = accuracy_score(predictions, df_twitter_test.airline_sentiment)
# Make sure our score is good, but not unreasonably good
print('test_score')
print(test_score)
assert 0.72 < test_score < 0.77
def test_binary_classification_predict_proba_on_Predictor_instance():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
ml_predictor = utils.train_basic_binary_classifier(df_titanic_train)
#
predictions = ml_predictor.predict_proba(df_titanic_test)
predictions = [pred[1] for pred in predictions]
test_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
# Make sure our score is good, but not unreasonably good
print(test_score)
assert -0.16 < test_score < -0.135
def test_pass_in_list_of_dictionaries_train_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
list_titanic_train = df_titanic_train.to_dict('records')
ml_predictor.train(list_titanic_train)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_pass_in_list_of_dictionaries_predict_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
list_titanic_train = df_titanic_train.to_dict('records')
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_include_bad_y_vals_train_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
df_titanic_train.iloc[1]['survived'] = None
df_titanic_train.iloc[8]['survived'] = None
df_titanic_train.iloc[26]['survived'] = None
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.17 < test_score < -0.135
def test_include_bad_y_vals_predict_classification():
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
df_titanic_test.iloc[1]['survived'] = float('nan')
df_titanic_test.iloc[8]['survived'] = float('inf')
df_titanic_test.iloc[26]['survived'] = None
ml_predictor.train(df_titanic_train)
test_score = ml_predictor.score(df_titanic_test.to_dict('records'), df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
def test_list_of_single_model_name_classification():
np.random.seed(0)
model_name = 'GradientBoostingClassifier'
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=[model_name])
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.16 < test_score < -0.135
if os.environ.get('TRAVIS_PYTHON_VERSION', '0') != '3.5':
def test_getting_single_predictions_nlp_date_multilabel_classification():
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
column_descriptions = {
'airline_sentiment': 'output'
, 'airline': 'categorical'
, 'text': 'nlp'
, 'tweet_location': 'categorical'
, 'user_timezone': 'categorical'
, 'tweet_created': 'date'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_twitter_train)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_twitter_test_dictionaries = df_twitter_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = 0.73
assert lower_bound < first_score < 0.79
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_twitter_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('df_twitter_test_dictionaries')
print(df_twitter_test_dictionaries)
second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < 0.79
| mit |
hainm/scipy | scipy/special/basic.py | 26 | 64332 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt,
where, mgrid, sin, place, issubdtype, extract,
less, inexact, nan, zeros, atleast_1d, sinc)
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta,
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint, poch,
binom)
from . import specfun
from . import orthogonal
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'errprint', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
"""Warning that can be issued with ``errprint(True)``"""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where n is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8,8));
>>> for idx, n in enumerate([2,3,4,9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
We get the exact Fourier transform if we multiply by the phase
factors ``np.exp(-1j*theta)``:
>>> np.fft.fft(x)
array([ 3.00000000+0.j , 1.70710678-1.70710678j,
0.00000000-1.j , 0.29289322+0.29289322j,
1.00000000+0.j , 0.29289322-0.29289322j,
0.00000000+1.j , 1.70710678+1.70710678j])
>>> np.exp(-1j*theta) * k * special.diric(theta, k)
array([ 3.00000000+0.j , 1.70710678-1.70710678j,
0.00000000-1.j , 0.29289322+0.29289322j,
1.00000000+0.j , 0.29289322-0.29289322j,
-0.00000000+1.j , 1.70710678+1.70710678j])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute nt zeros of Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length nt, corresponding to the first nt zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute nt zeros of Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute nt zeros of Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute nt zeros of Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute nt zeros of Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=0):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=0):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=0):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : int, default 0
Set to 0 to return only the real zeros; set to 1 to return only the
complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Kv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect to z.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to z.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
"""Compute Ricatti-Bessel function of the first kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and derivative.
This function computes the value and first derivative of the function for
all orders up to and including n.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:`f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x, n, k=0.0):
"""Compute nth-order generalized (associated) Laguerre polynomial.
The polynomial :math:`L^(alpha)_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**alpha`` with ``alpha > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind, Pmn(z).
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
sv = errprint(0)
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Associated Legendre function of the second kind, Qmn(z).
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre functions of the first kind, Pn(z).
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre functions of the second kind, Qn(z).
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first nt zeros, a, of the Airy function Ai(x); first nt zeros,
a', of the derivative of the Airy function Ai'(x); the corresponding values
Ai(a'); and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First nt zeros of Ai(x)
ap : ndarray
First nt zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first nt zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first nt zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first nt zeros, b, of the Airy function Bi(x); first nt zeros,
b', of the derivative of the Airy function Bi'(x); the corresponding values
Bi(b'); and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First nt zeros of Bi(x)
bp : ndarray
First nt zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first nt zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first nt zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
"""Jahnke-Emden Lambda function, Lambdav(x).
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, ndarray
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
N = int(N)
k = int(k)
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def factorial(n, exact=False):
"""The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=False case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3,4,5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0
val = 1
for k in xrange(1, n+1):
val *= k
return val
else:
n = asarray(n)
vals = gamma(n+1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
| bsd-3-clause |
waterponey/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 41 | 2672 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
rlong011/trading-with-python | lib/extra.py | 77 | 2540 | '''
Created on Apr 28, 2013
Copyright: Jev Kuznetsov
License: BSD
'''
from __future__ import print_function
import sys
import urllib
import os
import xlrd # module for excel file reading
import pandas as pd
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def getSpyHoldings(dataDir):
''' get SPY holdings from the net, uses temp data storage to save xls file '''
dest = os.path.join(dataDir,"spy_holdings.xls")
if os.path.exists(dest):
print('File found, skipping download')
else:
print('saving to', dest)
urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700",
dest) # download xls file and save it to data directory
# parse
wb = xlrd.open_workbook(dest) # open xls file, create a workbook
sh = wb.sheet_by_index(0) # select first sheet
data = {'name':[], 'symbol':[], 'weight':[],'sector':[]}
for rowNr in range(5,505): # cycle through the rows
v = sh.row_values(rowNr) # get all row values
data['name'].append(v[0])
data['symbol'].append(v[1]) # symbol is in the second column, append it to the list
data['weight'].append(float(v[2]))
data['sector'].append(v[3])
return pd.DataFrame(data)
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/mpl_toolkits/mplot3d/art3d.py | 10 | 25411 | # art3d.py, original mplot3d version by John Porter
# Parts rewritten by Reinier Heeres <[email protected]>
# Minor additions by Ben Axelrod <[email protected]>
'''
Module containing 3D artist code and functions to convert 2D
artists into 3D versions which can be added to an Axes3D.
'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import lines, text as mtext, path as mpath, colors as mcolors
from matplotlib import artist
from matplotlib.collections import Collection, LineCollection, \
PolyCollection, PatchCollection, PathCollection
from matplotlib.cm import ScalarMappable
from matplotlib.patches import Patch
from matplotlib.colors import Normalize
from matplotlib.cbook import iterable
import warnings
import numpy as np
import math
from . import proj3d
def norm_angle(a):
"""Return angle between -180 and +180"""
a = (a + 360) % 360
if a > 180:
a = a - 360
return a
def norm_text_angle(a):
"""Return angle between -90 and +90"""
a = (a + 180) % 180
if a > 90:
a = a - 180
return a
def get_dir_vector(zdir):
if zdir == 'x':
return np.array((1, 0, 0))
elif zdir == 'y':
return np.array((0, 1, 0))
elif zdir == 'z':
return np.array((0, 0, 1))
elif zdir is None:
return np.array((0, 0, 0))
elif iterable(zdir) and len(zdir) == 3:
return zdir
else:
raise ValueError("'x', 'y', 'z', None or vector of length 3 expected")
class Text3D(mtext.Text):
'''
Text object with 3D position and (in the future) direction.
'''
def __init__(self, x=0, y=0, z=0, text='', zdir='z', **kwargs):
'''
*x*, *y*, *z* Position of text
*text* Text string to display
*zdir* Direction of text
Keyword arguments are passed onto :func:`~matplotlib.text.Text`.
'''
mtext.Text.__init__(self, x, y, text, **kwargs)
self.set_3d_properties(z, zdir)
def set_3d_properties(self, z=0, zdir='z'):
x, y = self.get_position()
self._position3d = np.array((x, y, z))
self._dir_vec = get_dir_vector(zdir)
self.stale = True
def draw(self, renderer):
proj = proj3d.proj_trans_points([self._position3d, \
self._position3d + self._dir_vec], renderer.M)
dx = proj[0][1] - proj[0][0]
dy = proj[1][1] - proj[1][0]
if dx==0. and dy==0.:
# atan2 raises ValueError: math domain error on 0,0
angle = 0.
else:
angle = math.degrees(math.atan2(dy, dx))
self.set_position((proj[0][0], proj[1][0]))
self.set_rotation(norm_text_angle(angle))
mtext.Text.draw(self, renderer)
self.stale = False
def text_2d_to_3d(obj, z=0, zdir='z'):
"""Convert a Text to a Text3D object."""
obj.__class__ = Text3D
obj.set_3d_properties(z, zdir)
class Line3D(lines.Line2D):
'''
3D line object.
'''
def __init__(self, xs, ys, zs, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.lines.Line2D`.
'''
lines.Line2D.__init__(self, [], [], *args, **kwargs)
self._verts3d = xs, ys, zs
def set_3d_properties(self, zs=0, zdir='z'):
xs = self.get_xdata()
ys = self.get_ydata()
try:
# If *zs* is a list or array, then this will fail and
# just proceed to juggle_axes().
zs = float(zs)
zs = [zs for x in xs]
except TypeError:
pass
self._verts3d = juggle_axes(xs, ys, zs, zdir)
self.stale = True
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_data(xs, ys)
lines.Line2D.draw(self, renderer)
self.stale = False
def line_2d_to_3d(line, zs=0, zdir='z'):
'''
Convert a 2D line to 3D.
'''
line.__class__ = Line3D
line.set_3d_properties(zs, zdir)
def path_to_3d_segment(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d
def paths_to_3d_segments(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
for path, pathz in zip(paths, zs):
segments.append(path_to_3d_segment(path, pathz, zdir))
return segments
def path_to_3d_segment_with_codes(path, zs=0, zdir='z'):
'''Convert a path to a 3D segment with path codes.'''
if not iterable(zs):
zs = np.ones(len(path)) * zs
seg = []
codes = []
pathsegs = path.iter_segments(simplify=False, curves=False)
for (((x, y), code), z) in zip(pathsegs, zs):
seg.append((x, y, z))
codes.append(code)
seg3d = [juggle_axes(x, y, z, zdir) for (x, y, z) in seg]
return seg3d, codes
def paths_to_3d_segments_with_codes(paths, zs=0, zdir='z'):
'''
Convert paths from a collection object to 3D segments with path codes.
'''
if not iterable(zs):
zs = np.ones(len(paths)) * zs
segments = []
codes_list = []
for path, pathz in zip(paths, zs):
segs, codes = path_to_3d_segment_with_codes(path, pathz, zdir)
segments.append(segs)
codes_list.append(codes)
return segments, codes_list
class Line3DCollection(LineCollection):
'''
A collection of 3D lines.
'''
def __init__(self, segments, *args, **kwargs):
'''
Keyword arguments are passed onto :func:`~matplotlib.collections.LineCollection`.
'''
LineCollection.__init__(self, segments, *args, **kwargs)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_segments(self, segments):
'''
Set 3D segments
'''
self._segments3d = np.asanyarray(segments)
LineCollection.set_segments(self, [])
def do_3d_projection(self, renderer):
'''
Project the points according to renderer matrix.
'''
xyslist = [
proj3d.proj_trans_points(points, renderer.M) for points in
self._segments3d]
segments_2d = [list(zip(xs, ys)) for (xs, ys, zs) in xyslist]
LineCollection.set_segments(self, segments_2d)
# FIXME
minz = 1e9
for (xs, ys, zs) in xyslist:
minz = min(minz, min(zs))
return minz
def draw(self, renderer, project=False):
if project:
self.do_3d_projection(renderer)
LineCollection.draw(self, renderer)
def line_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a LineCollection to a Line3DCollection object."""
segments3d = paths_to_3d_segments(col.get_paths(), zs, zdir)
col.__class__ = Line3DCollection
col.set_segments(segments3d)
class Patch3D(Patch):
'''
3D patch object.
'''
def __init__(self, *args, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_3d_properties(self, verts, zs=0, zdir='z'):
if not iterable(zs):
zs = np.ones(len(verts)) * zs
self._segment3d = [juggle_axes(x, y, z, zdir) \
for ((x, y), z) in zip(verts, zs)]
self._facecolor3d = Patch.get_facecolor(self)
def get_path(self):
return self._path2d
def get_facecolor(self):
return self._facecolor2d
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)))
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def draw(self, renderer):
Patch.draw(self, renderer)
class PathPatch3D(Patch3D):
'''
3D PathPatch object.
'''
def __init__(self, path, **kwargs):
zs = kwargs.pop('zs', [])
zdir = kwargs.pop('zdir', 'z')
Patch.__init__(self, **kwargs)
self.set_3d_properties(path, zs, zdir)
def set_3d_properties(self, path, zs=0, zdir='z'):
Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)
self._code3d = path.codes
def do_3d_projection(self, renderer):
s = self._segment3d
xs, ys, zs = list(zip(*s))
vxs, vys,vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
self._path2d = mpath.Path(list(zip(vxs, vys)), self._code3d)
# FIXME: coloring
self._facecolor2d = self._facecolor3d
return min(vzs)
def get_patch_verts(patch):
"""Return a list of vertices for the path of a patch."""
trans = patch.get_patch_transform()
path = patch.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
else:
return []
def patch_2d_to_3d(patch, z=0, zdir='z'):
"""Convert a Patch to a Patch3D object."""
verts = get_patch_verts(patch)
patch.__class__ = Patch3D
patch.set_3d_properties(verts, z, zdir)
def pathpatch_2d_to_3d(pathpatch, z=0, zdir='z'):
"""Convert a PathPatch to a PathPatch3D object."""
path = pathpatch.get_path()
trans = pathpatch.get_patch_transform()
mpath = trans.transform_path(path)
pathpatch.__class__ = PathPatch3D
pathpatch.set_3d_properties(mpath, z, zdir)
class Patch3DCollection(PatchCollection):
'''
A collection of 3D patches.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D patches with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of patches in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PatchCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PatchCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PatchCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0:
return min(vzs)
else:
return np.nan
class Path3DCollection(PathCollection):
'''
A collection of 3D paths.
'''
def __init__(self, *args, **kwargs):
"""
Create a collection of flat 3D paths with its normal vector
pointed in *zdir* direction, and located at *zs* on the *zdir*
axis. 'zs' can be a scalar or an array-like of the same length as
the number of paths in the collection.
Constructor arguments are the same as for
:class:`~matplotlib.collections.PathCollection`. In addition,
keywords *zs=0* and *zdir='z'* are available.
Also, the keyword argument "depthshade" is available to
indicate whether or not to shade the patches in order to
give the appearance of depth (default is *True*).
This is typically desired in scatter plots.
"""
zs = kwargs.pop('zs', 0)
zdir = kwargs.pop('zdir', 'z')
self._depthshade = kwargs.pop('depthshade', True)
PathCollection.__init__(self, *args, **kwargs)
self.set_3d_properties(zs, zdir)
def set_sort_zpos(self, val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
self.stale = True
def do_3d_projection(self, renderer):
xs, ys, zs = self._offsets3d
vxs, vys, vzs, vis = proj3d.proj_transform_clip(xs, ys, zs, renderer.M)
fcs = (zalpha(self._facecolor3d, vzs) if self._depthshade else
self._facecolor3d)
fcs = mcolors.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = (zalpha(self._edgecolor3d, vzs) if self._depthshade else
self._edgecolor3d)
ecs = mcolors.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
PathCollection.set_offsets(self, list(zip(vxs, vys)))
if vzs.size > 0 :
return min(vzs)
else :
return np.nan
def patch_collection_2d_to_3d(col, zs=0, zdir='z', depthshade=True):
"""
Convert a :class:`~matplotlib.collections.PatchCollection` into a
:class:`Patch3DCollection` object
(or a :class:`~matplotlib.collections.PathCollection` into a
:class:`Path3DCollection` object).
Keywords:
*za* The location or locations to place the patches in the
collection along the *zdir* axis. Defaults to 0.
*zdir* The axis in which to place the patches. Default is "z".
*depthshade* Whether to shade the patches to give a sense of depth.
Defaults to *True*.
"""
if isinstance(col, PathCollection):
col.__class__ = Path3DCollection
elif isinstance(col, PatchCollection):
col.__class__ = Patch3DCollection
col._depthshade = depthshade
col.set_3d_properties(zs, zdir)
class Poly3DCollection(PolyCollection):
'''
A collection of 3D polygons.
'''
def __init__(self, verts, *args, **kwargs):
'''
Create a Poly3DCollection.
*verts* should contain 3D coordinates.
Keyword arguments:
zsort, see set_zsort for options.
Note that this class does a bit of magic with the _facecolors
and _edgecolors properties.
'''
zsort = kwargs.pop('zsort', True)
PolyCollection.__init__(self, verts, *args, **kwargs)
self.set_zsort(zsort)
self._codes3d = None
_zsort_functions = {
'average': np.average,
'min': np.min,
'max': np.max,
}
def set_zsort(self, zsort):
'''
Set z-sorting behaviour:
boolean: if True use default 'average'
string: 'average', 'min' or 'max'
'''
if zsort is True:
zsort = 'average'
if zsort is not False:
if zsort in self._zsort_functions:
zsortfunc = self._zsort_functions[zsort]
else:
return False
else:
zsortfunc = None
self._zsort = zsort
self._sort_zpos = None
self._zsortfunc = zsortfunc
self.stale = True
def get_vector(self, segments3d):
"""Optimize points for projection"""
si = 0
ei = 0
segis = []
points = []
for p in segments3d:
points.extend(p)
ei = si+len(p)
segis.append((si, ei))
si = ei
if len(segments3d) > 0 :
xs, ys, zs = list(zip(*points))
else :
# We need this so that we can skip the bad unpacking from zip()
xs, ys, zs = [], [], []
ones = np.ones(len(xs))
self._vec = np.array([xs, ys, zs, ones])
self._segis = segis
def set_verts(self, verts, closed=True):
'''Set 3D vertices.'''
self.get_vector(verts)
# 2D verts will be updated at draw time
PolyCollection.set_verts(self, [], closed)
def set_verts_and_codes(self, verts, codes):
'''Sets 3D vertices with path codes'''
# set vertices with closed=False to prevent PolyCollection from
# setting path codes
self.set_verts(verts, closed=False)
# and set our own codes instead.
self._codes3d = codes
def set_3d_properties(self):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
self._sort_zpos = None
self.set_zsort(True)
self._facecolors3d = PolyCollection.get_facecolors(self)
self._edgecolors3d = PolyCollection.get_edgecolors(self)
self._alpha3d = PolyCollection.get_alpha(self)
self.stale = True
def set_sort_zpos(self,val):
'''Set the position to use for z-sorting.'''
self._sort_zpos = val
self.stale = True
def do_3d_projection(self, renderer):
'''
Perform the 3D projection for this object.
'''
# FIXME: This may no longer be needed?
if self._A is not None:
self.update_scalarmappable()
self._facecolors3d = self._facecolors
txs, tys, tzs = proj3d.proj_transform_vec(self._vec, renderer.M)
xyzlist = [(txs[si:ei], tys[si:ei], tzs[si:ei])
for si, ei in self._segis]
# This extra fuss is to re-order face / edge colors
cface = self._facecolors3d
cedge = self._edgecolors3d
if len(cface) != len(xyzlist):
cface = cface.repeat(len(xyzlist), axis=0)
if len(cedge) != len(xyzlist):
if len(cedge) == 0:
cedge = cface
else:
cedge = cedge.repeat(len(xyzlist), axis=0)
# if required sort by depth (furthest drawn first)
if self._zsort:
indices = range(len(xyzlist))
z_segments_2d = [(self._zsortfunc(zs), list(zip(xs, ys)), fc, ec,
idx) for (xs, ys, zs), fc, ec, idx in
zip(xyzlist, cface, cedge, indices)]
z_segments_2d.sort(key=lambda x: x[0], reverse=True)
else:
raise ValueError("whoops")
segments_2d = [s for z, s, fc, ec, idx in z_segments_2d]
if self._codes3d is not None:
codes = [self._codes3d[idx] for z, s, fc, ec, idx in z_segments_2d]
PolyCollection.set_verts_and_codes(self, segments_2d, codes)
else:
PolyCollection.set_verts(self, segments_2d)
self._facecolors2d = [fc for z, s, fc, ec, idx in z_segments_2d]
if len(self._edgecolors3d) == len(cface):
self._edgecolors2d = [ec for z, s, fc, ec, idx in z_segments_2d]
else:
self._edgecolors2d = self._edgecolors3d
# Return zorder value
if self._sort_zpos is not None:
zvec = np.array([[0], [0], [self._sort_zpos], [1]])
ztrans = proj3d.proj_transform_vec(zvec, renderer.M)
return ztrans[2][0]
elif tzs.size > 0 :
# FIXME: Some results still don't look quite right.
# In particular, examine contourf3d_demo2.py
# with az = -54 and elev = -45.
return np.min(tzs)
else :
return np.nan
def set_facecolor(self, colors):
PolyCollection.set_facecolor(self, colors)
self._facecolors3d = PolyCollection.get_facecolor(self)
set_facecolors = set_facecolor
def set_edgecolor(self, colors):
PolyCollection.set_edgecolor(self, colors)
self._edgecolors3d = PolyCollection.get_edgecolor(self)
set_edgecolors = set_edgecolor
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float or *None*.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = mcolors.to_rgba_array(
self._facecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
self._edgecolors = mcolors.to_rgba_array(
self._edgecolors3d, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
self.stale = True
def get_facecolors(self):
return self._facecolors2d
get_facecolor = get_facecolors
def get_edgecolors(self):
return self._edgecolors2d
get_edgecolor = get_edgecolors
def draw(self, renderer):
return Collection.draw(self, renderer)
def poly_collection_2d_to_3d(col, zs=0, zdir='z'):
"""Convert a PolyCollection to a Poly3DCollection object."""
segments_3d, codes = paths_to_3d_segments_with_codes(col.get_paths(),
zs, zdir)
col.__class__ = Poly3DCollection
col.set_verts_and_codes(segments_3d, codes)
col.set_3d_properties()
def juggle_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that 2D xs, ys can be plotted in the plane
orthogonal to zdir. zdir is normally x, y or z. However, if zdir
starts with a '-' it is interpreted as a compensation for rotate_axes.
"""
if zdir == 'x':
return zs, xs, ys
elif zdir == 'y':
return xs, zs, ys
elif zdir[0] == '-':
return rotate_axes(xs, ys, zs, zdir)
else:
return xs, ys, zs
def rotate_axes(xs, ys, zs, zdir):
"""
Reorder coordinates so that the axes are rotated with zdir along
the original z axis. Prepending the axis with a '-' does the
inverse transform, so zdir can be x, -x, y, -y, z or -z
"""
if zdir == 'x':
return ys, zs, xs
elif zdir == '-x':
return zs, xs, ys
elif zdir == 'y':
return zs, xs, ys
elif zdir == '-y':
return ys, zs, xs
else:
return xs, ys, zs
def iscolor(c):
try:
if len(c) == 4 or len(c) == 3:
if iterable(c[0]):
return False
if hasattr(c[0], '__float__'):
return True
except:
return False
return False
def get_colors(c, num):
"""Stretch the color argument to provide the required number num"""
if type(c) == type("string"):
c = mcolors.to_rgba(c)
if iscolor(c):
return [c] * num
if len(c) == num:
return c
elif iscolor(c):
return [c] * num
elif len(c) == 0: #if edgecolor or facecolor is specified as 'none'
return [[0,0,0,0]] * num
elif iscolor(c[0]):
return [c[0]] * num
else:
raise ValueError('unknown color format %s' % c)
def zalpha(colors, zs):
"""Modify the alphas of the color list according to depth"""
# FIXME: This only works well if the points for *zs* are well-spaced
# in all three dimensions. Otherwise, at certain orientations,
# the min and max zs are very close together.
# Should really normalize against the viewing depth.
colors = get_colors(colors, len(zs))
if zs.size > 0 :
norm = Normalize(min(zs), max(zs))
sats = 1 - norm(zs) * 0.7
colors = [(c[0], c[1], c[2], c[3] * s) for c, s in zip(colors, sats)]
return colors
| bsd-2-clause |
xwolf12/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
timsnyder/bokeh | examples/app/movies/main.py | 1 | 4227 | from os.path import dirname, join
import numpy as np
import pandas.io.sql as psql
import sqlite3 as sql
from bokeh.plotting import figure
from bokeh.layouts import layout, column
from bokeh.models import ColumnDataSource, Div
from bokeh.models.widgets import Slider, Select, TextInput
from bokeh.io import curdoc
from bokeh.sampledata.movies_data import movie_path
conn = sql.connect(movie_path)
query = open(join(dirname(__file__), 'query.sql')).read()
movies = psql.read_sql(query, conn)
movies["color"] = np.where(movies["Oscars"] > 0, "orange", "grey")
movies["alpha"] = np.where(movies["Oscars"] > 0, 0.9, 0.25)
movies.fillna(0, inplace=True) # just replace missing values with zero
movies["revenue"] = movies.BoxOffice.apply(lambda x: '{:,d}'.format(int(x)))
with open(join(dirname(__file__), "razzies-clean.csv")) as f:
razzies = f.read().splitlines()
movies.loc[movies.imdbID.isin(razzies), "color"] = "purple"
movies.loc[movies.imdbID.isin(razzies), "alpha"] = 0.9
axis_map = {
"Tomato Meter": "Meter",
"Numeric Rating": "numericRating",
"Number of Reviews": "Reviews",
"Box Office (dollars)": "BoxOffice",
"Length (minutes)": "Runtime",
"Year": "Year",
}
desc = Div(text=open(join(dirname(__file__), "description.html")).read(), sizing_mode="stretch_width")
# Create Input controls
reviews = Slider(title="Minimum number of reviews", value=80, start=10, end=300, step=10)
min_year = Slider(title="Year released", start=1940, end=2014, value=1970, step=1)
max_year = Slider(title="End Year released", start=1940, end=2014, value=2014, step=1)
oscars = Slider(title="Minimum number of Oscar wins", start=0, end=4, value=0, step=1)
boxoffice = Slider(title="Dollars at Box Office (millions)", start=0, end=800, value=0, step=1)
genre = Select(title="Genre", value="All",
options=open(join(dirname(__file__), 'genres.txt')).read().split())
director = TextInput(title="Director name contains")
cast = TextInput(title="Cast names contains")
x_axis = Select(title="X Axis", options=sorted(axis_map.keys()), value="Tomato Meter")
y_axis = Select(title="Y Axis", options=sorted(axis_map.keys()), value="Number of Reviews")
# Create Column Data Source that will be used by the plot
source = ColumnDataSource(data=dict(x=[], y=[], color=[], title=[], year=[], revenue=[], alpha=[]))
TOOLTIPS=[
("Title", "@title"),
("Year", "@year"),
("$", "@revenue")
]
p = figure(plot_height=600, plot_width=700, title="", toolbar_location=None, tooltips=TOOLTIPS, sizing_mode="scale_both")
p.circle(x="x", y="y", source=source, size=7, color="color", line_color=None, fill_alpha="alpha")
def select_movies():
genre_val = genre.value
director_val = director.value.strip()
cast_val = cast.value.strip()
selected = movies[
(movies.Reviews >= reviews.value) &
(movies.BoxOffice >= (boxoffice.value * 1e6)) &
(movies.Year >= min_year.value) &
(movies.Year <= max_year.value) &
(movies.Oscars >= oscars.value)
]
if (genre_val != "All"):
selected = selected[selected.Genre.str.contains(genre_val)==True]
if (director_val != ""):
selected = selected[selected.Director.str.contains(director_val)==True]
if (cast_val != ""):
selected = selected[selected.Cast.str.contains(cast_val)==True]
return selected
def update():
df = select_movies()
x_name = axis_map[x_axis.value]
y_name = axis_map[y_axis.value]
p.xaxis.axis_label = x_axis.value
p.yaxis.axis_label = y_axis.value
p.title.text = "%d movies selected" % len(df)
source.data = dict(
x=df[x_name],
y=df[y_name],
color=df["color"],
title=df["Title"],
year=df["Year"],
revenue=df["revenue"],
alpha=df["alpha"],
)
controls = [reviews, boxoffice, genre, min_year, max_year, oscars, director, cast, x_axis, y_axis]
for control in controls:
control.on_change('value', lambda attr, old, new: update())
inputs = column(*controls, width=320, height=1000)
inputs.sizing_mode = "fixed"
l = layout([
[desc],
[inputs, p],
], sizing_mode="scale_both")
update() # initial load of the data
curdoc().add_root(l)
curdoc().title = "Movies"
| bsd-3-clause |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/skimage/viewer/viewers/core.py | 33 | 13265 | """
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
| mit |
vortex-ape/scikit-learn | examples/compose/plot_digits_pipe.py | 2 | 1658 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
# Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs), cv=5)
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/io/formats/test_printing.py | 7 | 7591 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import compat
import pandas.io.formats.printing as printing
import pandas.io.formats.format as fmt
import pandas.core.config as cf
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
assert res == repr(b)
res = printing.pprint_thing(b, quote_strings=False)
assert res == b
class TestFormattBase(object):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 13
assert adj.len(cols[1]) == 13
assert adj.len(cols[2]) == 16
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 23
assert adj.len(cols[1]) == 23
assert adj.len(cols[2]) == 26
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
assert just('abc', 5, mode='left') == 'abc '
assert just('abc', 5, mode='center') == ' abc '
assert just('abc', 5, mode='right') == ' abc'
assert just(u'abc', 5, mode='left') == 'abc '
assert just(u'abc', 5, mode='center') == ' abc '
assert just(u'abc', 5, mode='right') == ' abc'
assert just(u'パンダ', 5, mode='left') == u'パンダ'
assert just(u'パンダ', 5, mode='center') == u'パンダ'
assert just(u'パンダ', 5, mode='right') == u'パンダ'
assert just(u'パンダ', 10, mode='left') == u'パンダ '
assert just(u'パンダ', 10, mode='center') == u' パンダ '
assert just(u'パンダ', 10, mode='right') == u' パンダ'
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len('abc') == 3
assert adj.len(u'abc') == 3
assert adj.len(u'パンダ') == 6
assert adj.len(u'パンダ') == 5
assert adj.len(u'パンダpanda') == 11
assert adj.len(u'パンダpanda') == 10
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 4
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 6
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
class TestTableSchemaRepr(object):
@classmethod
def setup_class(cls):
pytest.importorskip('IPython')
try:
import mock
except ImportError:
try:
from unittest import mock
except ImportError:
pytest.skip("Mock is not installed")
cls.mock = mock
from IPython.core.interactiveshell import InteractiveShell
cls.display_formatter = InteractiveShell.instance().display_formatter
def test_publishes(self):
df = pd.DataFrame({"A": [1, 2]})
objects = [df['A'], df, df] # dataframe / series
expected_keys = [
{'text/plain', 'application/vnd.dataresource+json'},
{'text/plain', 'text/html', 'application/vnd.dataresource+json'},
]
opt = pd.option_context('display.html.table_schema', True)
for obj, expected in zip(objects, expected_keys):
with opt:
formatted = self.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
with_latex = pd.option_context('display.latex.repr', True)
with opt, with_latex:
formatted = self.display_formatter.format(obj)
expected = {'text/plain', 'text/html', 'text/latex',
'application/vnd.dataresource+json'}
assert set(formatted[0].keys()) == expected
def test_publishes_not_implemented(self):
# column MultiIndex
# GH 15996
midx = pd.MultiIndex.from_product([['A', 'B'], ['a', 'b', 'c']])
df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)
opt = pd.option_context('display.html.table_schema', True)
with opt:
formatted = self.display_formatter.format(df)
expected = {'text/plain', 'text/html'}
assert set(formatted[0].keys()) == expected
def test_config_on(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", True):
result = df._repr_data_resource_()
assert result is not None
def test_config_default_off(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", False):
result = df._repr_data_resource_()
assert result is None
def test_enable_data_resource_formatter(self):
# GH 10491
formatters = self.display_formatter.formatters
mimetype = 'application/vnd.dataresource+json'
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# still there, just disabled
assert 'application/vnd.dataresource+json' in formatters
assert not formatters[mimetype].enabled
# able to re-set
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# smoke test that it works
self.display_formatter.format(cf)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# pytest.skip
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
| agpl-3.0 |
av8ramit/tensorflow | tensorflow/contrib/learn/python/learn/estimators/kmeans_test.py | 13 | 20278 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators import kmeans as kmeans_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.fit(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
steps = 10 * self.num_points // self.batch_size
kmeans.fit(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=learn.RunConfig(tf_random_seed=14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.fit(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertNear(self.true_score, score, self.true_score * 0.01)
def _infer_helper(self, kmeans, clusters, num_points):
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
# Test predict
assignments = list(
kmeans.predict_cluster_idx(input_fn=self.input_fn(
batch_size=num_points, points=points, num_epochs=1)))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = kmeans.transform(
input_fn=lambda: (constant_op.constant(points), None))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1,
keepdims=True) - 2 * np.dot(points, np.transpose(clusters)) +
np.transpose(np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.clusters()
# Run inference on small datasets.
self._infer_helper(kmeans, clusters, num_points=10)
self._infer_helper(kmeans, clusters, num_points=1)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.fit(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.clusters()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.fit(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0, keepdims=True))[
0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0, keepdims=True))[
0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.fit(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.clusters())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = self.kmeans.transform(input_fn=self.input_fn(
batch_size=self.num_points))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.fit(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.clusters())
assignments = list(
self.kmeans.predict_cluster_idx(input_fn=self.input_fn(
num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points), steps=1)
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[4:, :], axis=0, keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.fit(input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.clusters())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_idx(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(
input_fn=lambda: (constant_op.constant(points), None), steps=1)
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=run_config.RunConfig(tf_random_seed=3))
tf_kmeans.fit(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.clusters()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None),
steps=1))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.fit(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.fit(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
biomadeira/pycma | src/__init__.py | 1 | 2165 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2012 F. Madeira and L. Krippahl, 2012
# This code is part of PyCMA distribution.
# GNU General Public License - See LICENSE for more details.
#-----------------------------------------------------------------------
"""
PyCMA - A Python Module for Correlated Mutation Analysis.
"""
"""
This file is part of PyCMA.
pycma is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
pycma is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this file. If not, see <http://www.gnu.org/licenses/>.
"""
#-----------------------------------------------------------------------
__author__ = "F. Madeira ([email protected])"
__all__ = ["pycma", "utils", "version", "test_installation"]
# ensure the user is running the version of python we require
import sys
if not hasattr(sys, "version_info") or sys.version_info < (2,7):
raise RuntimeError("pycoevol requires Python 2.7 or later.")
del sys
# ensure the user has Biopython installed
try:
import Bio
del Bio
except ImportError:
raise RuntimeError("pycoevol requires Biopython 1.5.7 or later.")
# ensure the user has Biopython installed
try:
import numpy
del numpy
except ImportError:
raise RuntimeError("pycoevol requires Numpy 1.5.1 or later.")
# ensure the user has MatplotLib installed
try:
import matplotlib
del matplotlib
except ImportError:
raise RuntimeError("pycoevol requires MatplotLib 1.0.1 or later.")
# imports
from pycma import *
| gpl-3.0 |
dhruv13J/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
blorgon9000/pyopus | pyopus/evaluator/measure.py | 1 | 36720 | # Measurements module
"""
.. inheritance-diagram:: pyopus.evaluator.measure
:parts: 1
**Performance measure extraction module**
All functions in this module do one of the following things:
* return an object of a Python numeric type (int, float, or complex)
* return an n-dimensional array (where n can be 0) of int, float or
complex type
* return None to indicate a failure
* raise an exception to indicate a more severe failure
All signals x(t) are defined in tabular form which means that a signal is
fully defined with two 1-dimensional arrays of values of the same
size. One array is the **scale** which represents the values of the scale (t)
while the other column represents the values of the **signal** (x) corresponding
to the scale points.
"""
from numpy import array, floor, ceil, where, hstack, unique, abs, arctan, pi, NaN, log10
from scipy import angle, unwrap
# import matplotlib.pyplot as plt
__all__ = [ 'Deg2Rad', 'Rad2Deg', 'dB2gain', 'gain2dB', 'XatI', 'IatXval', 'filterI', 'XatIrange',
'dYdI', 'dYdX', 'integYdX', 'DCgain', 'DCswingAtGain', 'ACcircle', 'ACtf', 'ACmag',
'ACphase', 'ACgain' ,'ACbandwidth', 'ACphaseMargin', 'ACgainMargin',
'Tdelay', 'Tshoot', 'Tovershoot', 'Tundershoot', 'TedgeTime', 'TriseTime', 'TfallTime',
'TslewRate', 'TsettlingTime', 'Poverdrive' ]
#------------------------------------------------------------------------------
# Conversions
def Deg2Rad(degrees):
"""
Converts degrees to radians.
"""
return degrees/180.0*pi
def Rad2Deg(radians):
"""
Converts radians to degrees.
"""
return radians*180.0/pi
def dB2gain(db, unit='db20'):
"""
Converts gain magnitude in decibels to gain as a factor.
*unit* is the type of decibels to convert from:
* ``db`` and ``db20`` are equivalent and should be used when the conversion
of voltage/current gain decibels is required (20dB = gain factor of 10.0).
* ``db10`` should be used for power gain decibels conversion
(10dB = gain factor of 10.0)
"""
if unit=='db':
return 10.0**(db/20.0)
elif unit=='db20':
return 10.0**(db/20.0)
elif unit=='db10':
return 10.0**(db/10.0)
else:
raise Exception, "Bad magnitude unit."
def gain2dB(x, unit='db20'):
"""
Converts gain as a factor to magnitude in decibels.
*unit* is the type of decibels to convert to:
* ``db`` and ``db20`` are equivalent and should be used when the conversion
to voltage/current gain decibels is required (gain factor of 10.0 = 20dB).
* ``db10`` should be used for conversion to power gain decibels conversion
(gain factor of 10.0 = 10dB)
"""
if unit=='db':
return 20.0*log10(x)
elif unit=='db20':
return 20.0*log10(x)
elif unit=='db10':
return 10.0*log10(x)
else:
raise Exception, "Bad magnitude unit."
#------------------------------------------------------------------------------
# Fractional indexing and cursors
def XatI(x, i):
"""
Returns the value in 1-dimensional array *x* corresponding to fractional
index *i*. This operation is equivalent to a table lookup with linear
interpolation where the first column of the table represents the index (*i)
and the second column holds the components of *x*.
If *i* is a 1-dimensional array the return value is an array of the same
shape holding the results of table lookups corresponding to fractional
indices in array *i*.
*i* must satisfy 0 <= *i* <= x.size-1.
"""
xLen=x.size
xa=array(x)
ia=array(i)
if ia.size==0:
return array([])
if array(i<0).any() or array(i>xLen-1).any():
raise Exception, "Index out of range."
# Interpolate
i1=floor(ia).astype(int)
i2=ceil(ia).astype(int)
frac=ia-i1
xa1=xa[i1]
xa2=xa[i2]
return xa1+(xa2-xa1)*frac
def IatXval(x, val, slope='any'):
"""
Returns a 1-dimensional array of fractional indices corresponding places in
vector *x* where the linearly interpolated value is equal to *val*. These
are the crossings of function f(i)=x(i) with g(i)=val. *slope* specifies
what kind of crossings are returned:
* ``any`` - return all crossings
* ``rising`` - return only crossings where the slope of f(i) is
positive or zero
* ``falling`` - return only crossings where the slope of f(i) is
negative or zero
This function corresponds to a reverse table lookup with linear
interpolation where the first folumn of the table contains the index and
the second column contains the corresponding values of *x*. The reverse
lookup finds the fractional indices coresponding to *val* in the second
column of the table.
There can be no crossings (empty return array) or more than one crossing
(return array size>1).
The fractional indices are returned in an increasing sequence.
"""
xa=array(x)
val=array(val)
if val.size!=1:
raise Exception, "Value must be a scalar."
if (val<xa.min()) or (val>xa.max()):
return array([], float)
# Detect level corssing
belowOrEqual=(xa<=val)
aboveOrEqual=(xa>=val)
# Detect edges
risingEdge=belowOrEqual[:-1] & aboveOrEqual[1:]
fallingEdge=aboveOrEqual[:-1] & belowOrEqual[1:]
anyEdge=risingEdge | fallingEdge
if slope=='rising':
edge=risingEdge
elif slope=='falling':
edge=fallingEdge
elif slope=='any':
edge=anyEdge
else:
raise Exception, "Bad edge type."
# Get candidate intervals
candidates=where(edge)[0]
# Prepare interval edges and deltas
i1=candidates
i2=candidates+array(1)
x1=xa[i1]
x2=xa[i2]
dx=x2-x1
# Zero delta interval indices
zeroDeltaI=where(dx==0)[0]
nonzeroDeltaI=where(dx!=0)[0]
# Handle zero delta intervals
ii=hstack((i1[zeroDeltaI], i2[zeroDeltaI]))
# Handle nonzero delta intervals
ii=hstack((ii, 1.0/dx[nonzeroDeltaI]*(val-x1[nonzeroDeltaI])+i1[nonzeroDeltaI]))
return unique(ii)
def filterI(i, direction='right', start=None, includeStart=False):
"""
Returns a 1-dimensional array of fractional indices obtained by processing
fractional indices given in *i*.
If *direction* is ``right`` *i* is traversed from lower to higher indices.
Only the indices from *i* that are greater than *start* are included in the
return value.
If *direction* is ``left`` *i* is traversed from higher to lower indices.
Only the indices from *i* that are less than *start* are included in the
return value.
The filtered indices are returned in the same order as they appear in *i*.
If *includeStart* is ``True`` the *greater than* and *less than* comparison
operators are replaced by *greater than or equal to* and *less than or
equal to*. This includes indices which are equal to *start* in the return
value.
If *start* is not given, it defaults to ``i[0]`` if *direction* is
``right`` and ``i[-1]`` if *direction* is ``left``.
"""
if start is None:
if direction=='right':
if i.size>0:
start=i[0]
else:
start=0
elif direction=='left':
if i.size>0:
start=i[-1]
else:
start=0
else:
raise Exception, "Bad direction."
if direction=='right':
if includeStart:
selector=i>=start
else:
selector=i>start
elif direction=='left':
if includeStart:
selector=i<=start
else:
selector=i<start
else:
raise Exception, "Bad direction."
return i[where(selector)[0]]
def XatIrange(x, i1, i2=None):
"""
Returns a subvector (1-dimensional array) of a vector given by
1-dimensional array *x*. The endpoints of the subvector correspond to
fractional indices *i1* and *i2*.
If *i2* is not given the return value is the same as the return value of
``XatI(x, i1)``.
*i1* and *i2* must satisfy
* 0 <= *i1* <= x.size-1
* 0 <= *i2* <= x.size-1
* *i1* <= *i2*
If the endpoints do not correspond to integer indices the subvector
endpoints are obtained with linear interpolation (see :func:`XatI`
function).
"""
xLen=x.size
if (i1<0) or (i1>xLen-1):
raise Exception, "Bad fractional index (i1)."
if i2 is None:
return XatI(x, i1)
if (i2<i1) or (i2<0) or (i2>xLen-1):
raise Exception, "Bad fractional index range."
if i1==i2:
return XatI(x, i1)
# Get integer subrange
ilo=ceil(i1).astype(int)
ihi=floor(i2).astype(int)
# Get core vector for the integer subrange
if ilo<=ihi:
coreVector=x[ilo:(ihi+1)]
else:
coreVector=[]
# Construct result
if i1!=ilo:
retval=XatI(x, i1)
else:
retval=[]
retval=hstack((retval, coreVector))
if i2!=ihi:
retval=hstack((retval, XatI(x, i2)))
# Done
return retval
#------------------------------------------------------------------------------
# Calculus
# Derivative wrt integer index
def dYdI(y):
"""
Returns the derivative of 1-dimensional vector *y* with respect to its
index.
Uses 2nd order polynomial interpolation before the actual derivative is
calculated.
"""
# Interpolating polynomial of 2nd order
# indices used for
# 0, 1, 2 0 and 1
# 1, 2, 3 2
# 2, 3, 4 3
# ... ...
# n-4, n-3, n-2 n-3
# n-3, n-2, n-1 n-2 and n-1
# y=a x^2 + b x + c
# dy/dx = 2 a x + b
# There are n-2 interpolating polynomials, get their coefficients
yminus=array(y[:-2])
y0=array(y[1:-1])
yplus=array(y[2:])
c=y0
b=(yplus-yminus)/2.0
a=(yplus+yminus-2.0*y0)/2.0
# Now differentiate polynomial
c=b
b=2.0*a
a=0
# Generate edge points
# For i=0 (effective index x=-1)
dylo=-b[0]+c[0]
# For i=n-1 (effective index x=1)
dyhi=b[-1]+c[-1]
# For everything else (effective index x=0)
coreVector=c
# Done
return hstack((dylo, coreVector, dyhi))
def dYdX(y, x):
"""
Derivative of a 1-dimensional vector *y* with respect to the 1-dimensional
vector *x*. The arrays representing *y* and *x* must be of the same size.
"""
return dYdI(y)/dYdI(x)
# Integrate vector wrt scale
# Returns an array of values. Each value is an integral from the beginning to belonging x component.
def integYdX(y, x):
"""
Integral of a 1-dimensional vector *x* with respect to its scale given by a
1-dimensional vector *x*. The arrays representing *y* and *x* must be of
the same size.
Uses 2nd order polynomial interpolation before the actual integral is
calculated.
The lower limit for integration is ``x[0]`` while the pints in *x* define
the upper limits. This means that the first point of the result (the one
corresponding to ``x[0]``) is 0.
"""
# Interpolating polynomial of 2nd order
# indices used for
# 0, 1, 2 0 and 1
# 1, 2, 3 2
# 2, 3, 4 3
# ... ...
# n-4, n-3, n-2 n-3
# n-3, n-2, n-1 n-2 and n-1
# y=a x^2 + b x + c
# dy/dx = 2 a x + b
# There are n-2 interpolating polynomials, get their coefficients
hminus=array(x[:-2]-x[1:-1])
hplus=array(x[2:]-x[1:-1])
yminus=array(y[:-2])
y0=array(y[1:-1])
yplus=array(y[2:])
c=y0
a=(yminus*hplus-yplus*hminus-c*(hplus-hminus))/(hplus*hminus*(hminus-hplus))
b=(yminus-c-a*hminus*hminus)/hminus
# Integrate polynomial (resulting in a x^3 + b x^2 + c x + C)
# Constant C is ignored.
a=a/3.0
b=b/2.0
# Calculate integral for last interval based on last interpolation
# (corresponding to 0..hplus)
ydxLast=a[-1]*(hplus[-1]**3)+b[-1]*(hplus[-1]**2)+c[-1]*hplus[-1]
# Calculate integral for first interval based on first interpolation
# (corresponding to hminus..0)
ydxFirst=-(a[0]*(hminus[0]**3)+b[0]*(hminus[0]**2)+c[0]*hminus[0])
# Calculate core integral - leading part
# values of integral for i..i+1 (corresponding to hminus..0)
coreVectorLeading=-(a*(hminus**3)+b*(hminus**2)+c*hminus)
# Calculate core integral - trailing part
# values of integral for i..i+1 (corresponding to 0..hplus)
coreVectorTrailing=a*(hplus**3)+b*(hplus**2)+c*hplus
# With zero, leading core vector, and ydxLast do a cumulative sum
integLeading=hstack((array(0.0), coreVectorLeading, ydxLast)).cumsum()
# With zero, ydxFirst, and trailing core vector do a cumulative sum
integTrailing=hstack((array(0.0), ydxFirst, coreVectorTrailing)).cumsum()
# Done
return (integLeading+integTrailing)/2.0
#------------------------------------------------------------------------------
# DC measurements
def DCgain(output, input):
"""
Returns the maximal gain (slope) of a nonlinear transfer function
*output(input*).
*output* and *input* are 1-dimensional arrays of the same size.
"""
# Get gain
A=dYdX(output, input)
# Return maximum
return A.max()
def DCswingAtGain(output, input, relLevel, type='out'):
"""
Returns the *input* or *output* interval corresponding to the range where
the gain (slope) of *output(input)* is above *relLevel* times maximal
slope. Only *rellevel* < 1 makes sense in this measurement.
*type* specifies what to return
* ``out`` - return the *output* values interval
* ``in`` - return the *input* values interval
*relLevel* must satisfy 0 <= *relLevel* <= 1.
"""
# Check
if (relLevel<=0) or (relLevel>=1):
raise Exception, "Bad relative level."
# Get gain (absolute)
A=abs(dYdX(output, input))
# Get maximum and level
Amax=A.max()
Alev=Amax*relLevel
# Find maximum
maxI=IatXval(A, Amax)
# Find crossings
crossI=IatXval(A, Alev)
if crossI.size<=0:
raise Exception, "No crossings with specified level found."
# Extract crossings to left and to right
Ileft=filterI(crossI, 'left', maxI.min())
Iright=filterI(crossI, 'right', maxI.max())
if Ileft.size<=0:
raise Exception, "No crossing to the left from the maximum found."
if Iright.size<=0:
raise Exception, "No crossing to the right from the maximum found."
# max(), min() will raise an exception if no crossing is found
i1=Ileft.max()
i2=Iright.min()
# Get corresponding range
if type=='out':
vec=output
elif type=='in':
vec=input
else:
raise Exception, "Bad output type."
return abs(XatI(vec, i2)-XatI(vec, i1))
#------------------------------------------------------------------------------
# AC measurements
def ACcircle(unit='deg'):
"""
Returns the full circle in units specified by *unit*
* ``deg`` - return 360
* ``rad`` - return 2*``pi``
"""
if unit=='deg':
return 360
elif unit=='rad':
return 2*pi
else:
raise Exception, "Bad angle unit."
def ACtf(output, input):
"""
Return the transfer function *output/input* where *output* and *input* are
complex vectors of the same size representing the systems response at
various frequencies.
"""
return array(output)/array(input)
def ACmag(tf, unit='db'):
"""
Return the magnitude in desired *unit* of a small signal tranfer function
*tf*.
* ``db`` and ``db20`` stand for voltage/current gain decibels where
20dB = gain factor of 10.0
* ``db10`` stands for voltage/current gain decibels where
10dB = gain factor of 10.0
* ``abs`` stands for gain factor
"""
mag=abs(array(tf))
if (unit=='db') or (unit=='db20'):
return 20*log10(mag)
elif unit=='db10':
return 10*log10(mag)
elif unit=='abs':
return mag
else:
raise Exception, "Bad magnitude unit."
def ACphase(tf, unit='deg', unwrapTol=0.5):
"""
Return the phase in desired *unit* of a transfer function *tf*
* ``deg`` stands for degrees
* ``rad`` stands for radians
The phase is unwrapped (discontinuities are stiched together to make it
continuous). The tolerance of the unwrapping (in radians) is
*unwrapTol* times ``pi``.
"""
# Get argument
ph=angle(tf)
# Unwrap if requested
if (unwrapTol>0) and (unwrapTol<1):
ph=unwrap(ph, unwrapTol*pi)
# Convert to requested unit
if unit=='deg':
return ph/pi*180.0
elif unit=='rad':
return ph
else:
raise Exception, "Bad phase unit."
def ACgain(tf, unit='db'):
"""
Returns the maximal gain magnitude of a transfer function in units given
by *unit*
* ``db`` and ``db20`` stand for voltage/current gain decibels where
20dB = gain factor of 10.0
* ``db10`` stands for power gain decibels where
10dB = gain factor of 10.0
* ``abs`` stands for gain factor
"""
mag=ACmag(tf, unit)
return mag.max()
def ACbandwidth(tf, scl, filter='lp', levelType='db', level=-3.0):
"""
Return the bandwidth of a transfer function *tf* on frequency scale *scl*.
*tf* and *scl* must be 1-dimensional arrays of the same size.
The type of the transfer function is given by *filter* where
* ``lp`` stands for low-pass (return frequency at *level*)
* ``hp`` stands for high-pass (return frequency at *level*)
* ``bp`` stands for band-pass (return bandwidth at *level*)
*levelType* gives the units for the *level* argument. Allowed values for
*levelType* are
* ``db`` and ``db20`` stand for voltage/current gain decibels where
20dB = gain factor of 10.0
* ``db10`` stands for power gain decibels where
10dB = gain factor of 10.0
* ``abs`` stands for gain factor
*level* specifies the level at which the bandwidth should be measured. For
``db``, ``db10``, and ``db20`` *levelType* the level is relative to the
maximal gain and is added to the maximal gain. For ``abs`` *levelType* the
level is a factor with which the maximal gain factor must be multiplied to
obtain the gain factor level at which the bandwidth should be measured.
"""
# Magnitude
mag=ACmag(tf, levelType)
# Reference level
ref=mag.max()
# Crossing level
if levelType=='abs':
cross=ref*level
else:
cross=ref+level
# Find crossings
crossI=IatXval(mag, cross)
# Find reference position
crossMaxI=IatXval(mag, ref).min()
if crossI.size<=0:
raise Exception, "No crossings with specified level found."
# Make scale real
scl=abs(scl)
# Handle filter type
if filter=='lp':
# Get first crossing to the right of the reference position
# min() will raise an exception if no crossing is found
bwI=filterI(crossI, 'right', crossMaxI)
if bwI.size<=0:
raise Exception, "No crossing to the right from the maximum found."
bwI=bwI.min()
bw=XatI(scl, bwI)
elif filter=='hp':
# Get first crossing to the left of the reference position
# max() will raise an exception if no crossing is found
bwI=filterI(crossI, 'left', crossMaxI).max()
if bwI.size<=0:
raise Exception, "No crossing to the left from the maximum found."
bwI=bwI.max()
bw=XatI(scl, bwI)
elif filter=='bp':
# Get first crossing to the left and the right of the reference position
# max(), min() will raise an exception if no crossing is found
bwI1=filterI(crossI, 'left', crossMaxI).max()
bwI2=filterI(crossI, 'right', crossMaxI).min()
if bwI1.size<=0:
raise Exception, "No crossing to the left from the maximum found."
if bwI2.size<=0:
raise Exception, "No crossing to the right from the maximum found."
bwI1=bwI1.max()
bwI2=bwI2.man()
bw=XatI(scl, bwI2)-XatI(scl, bwI1)
else:
raise Exception, "Bad filter type."
return bw
def ACugbw(tf, scl):
"""
Returns the uniti-gain bandwidth of a transfer function *tf* on frequency
scale *scl*. 1-dimensional arrays *tf* and *scl* must be of the same size.
The return value is the frequency at which the transfer function
reaches 1.0 (0dB).
"""
# Magnitude
mag=ACmag(tf, 'db')
# Make scale real
scl=abs(scl)
# Find 0dB magnitude
# min() will raise an exception if no crossing is found
crossI=IatXval(mag, 0.0)
if crossI.size<=0:
raise Exception, "No crossing with 0dB level found."
crossI=crossI.min()
# Calculate ugbw
ugbw=XatI(scl, crossI)
return ugbw
def ACphaseMargin(tf, unit='deg', unwrapTol=0.5):
"""
Returns the phase margin of a transfer function given by 1-dimensional
array *tf*. Uses *unwrapTol* as the unwrap tolerance for phase
(see :func:`ACphase`). The phase margin is returned in units given by
*unit* where
* ``deg`` stands for degrees
* ``rad`` stands for radians
The phase margin (in degrees) is the amount the phase at the point where
the transfer function magnitude reaches 0dB should be decreased to become
equal to -180.
For stable systems the phase margin is >0.
"""
# Magnitude
mag=ACmag(tf, 'db')
# Phase
ph=ACphase(tf, unit, unwrapTol)
# Find 0dB magnitude
crossI=IatXval(mag, 0.0)
if crossI.size<=0:
raise Exception, "No crossing with 0dB level found."
crossI=crossI.min()
# Calculate phase at 0dB
ph0=XatI(ph, crossI)
# Return phase margin
pm=ph0+ACcircle(unit)/2
return pm
# Gain margin of a tf
def ACgainMargin(tf, unit='db', unwrapTol=0.5):
"""
Returns the gain margin of a transfer function given by 1-dimensional array
*tf*. Uses *unwrapTol* as the unwrap tolerance for phase
(see :func:`ACphase`). The gain margin is returned in units given by *unit*
where
* ``db`` and ``db20`` stand for voltage/current gain decibels where
20dB = gain factor of 10.0
* ``db10`` stands for power gain decibels where
10dB = gain factor of 10.0
* ``abs`` stands for gain factor
The phase margin (in voltage/current gain decibels) is the amount the gain
at the point where phase reaches -180 degrees should be increased to become
equal to 0.
For stable systems the gain margin is >0.
"""
# Magnitude
mag=ACmag(tf, 'abs')
# Phase
ph=ACphase(tf, 'deg', unwrapTol)
# Find -180deg in phase
crossI=IatXval(ph, -180.0)
if crossI.size<=0:
raise Exception, "No crossing with -180 degrees level found."
crossI=crossI.min()
# Get gain at -180 degrees
mag180=XatI(mag, crossI)
# Gain margin in absolute units
gm=1.0/mag180
# Return selected units
return ACmag(gm, unit)
#------------------------------------------------------------------------------
# Transient measurements
def _refLevel(sig, scl, t1=None, t2=None):
"""
In signal *sig* with scale *scl* looks up the points where scale is equal
to *t1* and *t2*. The default values of *t1* and *t2* are the first and the
last value in *scl.
Returns a tuple (i1, s1, i2, s2) where i1 and i2 represent the fractional
indices of the two points in signal (or scale) while s1 and s2 represent
the values of the signal at those two points.
"""
# Get interesting part in terms of indices
if t1 is None:
i1=0
else:
i1=IatXval(scl, t1, 'rising')
if i1.size<=0:
raise Exception, "Start point not found."
i1=i1[0]
if t2 is None:
i2=scl.size-1
else:
i2=IatXval(scl, t2, 'rising')
if i2.size<=0:
raise Exception, "End point not found."
i2=i2[0]
if i1>=i2:
raise Exception, "Start point after end point."
# Get reference levels
s1=XatI(sig, i1)
s2=XatI(sig, i2)
return (i1, s1, i2, s2)
def Tdelay(sig1, sig2, scl,
lev1type='rel', lev1=0.5, edge1='any', skip1=0,
lev2type='rel', lev2=0.5, edge2='any', skip2=0,
t1=None, t2=None):
"""
Calculates the delay of signal *sig2* with respect to signal *sig1*. Both
signals share a common scale *scl*. The delay is the difference in scale
between the point where *sig2* reaches level *lev2*. *edge2* defines the
type of crossing between *sig2* and *lev2*
* ``rising`` - the slope of *sig2* is positive or zero at the crossing
* ``falling`` - the slope of *sig2* is negative or zero at the crossing
* ``any`` - the slope of *sig2* does not matter
*skip2* specifies how many crossings since the beginning of *sig2* are
skipped before the crossing that is used as the point in *sig2* is reached.
0 means that the first crossing is used as the point in *sig2*.
Similarly the point in *sig1* is defined with *lev1*, *edge1*, and *skip1*.
*t1* and *t2* are the points on the scale defining the beginning and the
end of the part of *sig1* and *sig2* which is used in the calculation of
the delay. *skip1* and *skip2* are counted from point *t1* on the scale.
The default values of *t1* and *t2* are the first and the last value in
*scl*.
If *lev1type* is ``abs`` *lev1* specifies the value of the signal at the
crossing. If *lev1type* is ``rel`` *lev1* specifies the relative value of
the signal (between 0.0 and 1.0) where the 0.0 level is defined as the
*sig1* level at point *t1* on the scale while the 1.0 level is defined as
the *sig1* level at point *t2* on the scale. If *t1* and *t2* are not given
the 0.0 and 1.0 relative levels are taken at the beginning and the end of
*sig2*.
Similarly *lev2type* defines the meaning of *lev2* with respect to *sig2*,
*t1*, and *t2*.
"""
# Get reference levels of sig1
(i1, s11, i2, s12)=_refLevel(sig1, scl, t1, t2)
# Get reference levels of sig2
s21=XatI(sig2, i1)
s22=XatI(sig2, i2)
# Extract interesting part
partsig1=XatIrange(sig1, i1, i2)
partsig2=XatIrange(sig2, i1, i2)
partscl=XatIrange(scl, i1, i2)
# Get level crossing for signal 1
if lev1type=='abs':
crossI1=IatXval(partsig1, lev1, edge1)
elif lev1type=='rel':
crossI1=IatXval(partsig1, s11+(s12-s11)*lev1, edge1)
else:
raise Exception, "Bad level type for first signal."
if skip1>=crossI1.size:
raise Exception, "No such crossing for first signal."
# Get level crossing for signal 2
if lev2type=='abs':
crossI2=IatXval(partsig2, lev2, edge2)
elif lev2type=='rel':
crossI2=IatXval(partsig2, s21+(s22-s21)*lev2, edge2)
else:
raise Exception, "Bad level type for first signal."
if skip2>=crossI2.size:
raise Exception, "No such crossing for second signal."
crossI1=crossI1[skip1]
crossI2=crossI2[skip2]
delay=XatI(partscl, crossI2)-XatI(partscl, crossI1)
return delay
def Tshoot(measureType, sig, scl,
t1=None, t2=None, outputType='rel'):
"""
Gets the overshoot or the undershoot of signal *sig* with scale *scl*. The
over/undershoot is measured on the scale interval between *t1* and *t2*. If
*t1* and *t2* are not given the whole signal *sig1* is used in the
measurement.
The 0.0 and 1.0 relative levels in the signal are defined as the values of
*sig* at points *t1* and *t2* on the scale. The default values of *t1* and
*t2* are the first and the last value in *scl*.
Overshoot is the amount the signal rises above the 1.0 relative level on
the observed scale interval defined by *t1* and *t2*. Undershoot is the
amount the signal falls below the 0.0 relative level on the observed scale
interval.
If *measureType* is set to ``over``, overshoot is measured and the function
expects the signal level at *t1* to be lower than the signal level at *t2*.
If *measureType* is ``under`` the opposite must hold.
Over/undershoot can be measured as relative (when *outputType* is ``rel``)
or absolute (when *outputType* is ``abs``). Abolute values reflect actual
signal values while relative values are measured with respect to the 0.0
and 1.0 relative signal level.
"""
# Get reference levels
(i1, s1, i2, s2)=_refLevel(sig, scl, t1, t2)
# Extract interesting part
partsig=XatIrange(sig, i1, i2)
partscl=XatIrange(scl, i1, i2)
if measureType=='over':
# Overshoot
if s1<=s2:
delta=partsig.max()-s2
else:
delta=0
elif measureType=='under':
# Undershoot
if s1>=s2:
delta=s2-partsig.min()
else:
delta=0
else:
raise Exception, "Bad measurement type."
if outputType=='abs':
return delta
elif outputType=='rel':
span=abs(s2-s1)
if span==0.0:
raise Exception, "Can't get relative value on flat signal."
return delta/span
else:
raise Exception, "Bad output type."
def Tovershoot(sig, scl,
t1=None, t2=None, outputType='rel'):
"""
An alias for :func:`Tshoot` with *measureType* set to ``over``.
"""
return Tshoot('over', sig, scl, t1, t2, outputType);
def Tundershoot(sig, scl,
t1=None, t2=None, outputType='rel'):
"""
An alias for :func:`Tshoot` with *measureType* set to ``under``.
"""
return Tshoot('under', sig, scl, t1, t2, outputType);
def TedgeTime(edgeType, sig, scl,
lev1type='rel', lev1=0.1,
lev2type='rel', lev2=0.9,
t1=None, t2=None):
"""
Measures rise or fall time (scale interval) of signal *sig* on scale *scl*.
The value of the *edgeType* parameter determines the type of the
measurement
* ``rising`` - measures rise time
* ``falling`` - measures fall time
*t1* and *t2* specify the scale interval on which the measurement takes
place. Their default values correspond to the first and the last value in
*scl. The values of the signal at *t1* and *t2* define the 0.0 and the 1.0
relative signal value.
*lev1type* and *lev* specify the point at which the signal rise/fall
begins. If *lev1type* is ``abs`` the level specified by *lev1* is the
actual signal value. If *lev1type* is ``rel`` the value given by *lev1* is
a relative signal value.
Similarly *lev2type* and *lev2* apply to the point at which the signal
rise/fall ends.
*lev1type*, *lev1*, *lev2type*, and *lev2* are by default set to measure
the 10%..90% rise/fall time.
"""
# Get reference levels
(i1, s1, i2, s2)=_refLevel(sig, scl, t1, t2)
# Extract interesting part
partsig=XatIrange(sig, i1, i2)
partscl=XatIrange(scl, i1, i2)
# Get crossing levels
if lev1type=='abs':
sc1=lev1
elif lev1type=='rel':
sc1=s1+(s2-s1)*lev1
else:
raise Exception, "Bad level type for first point."
if lev2type=='abs':
sc2=lev2
elif lev1type=='rel':
sc2=s1+(s2-s1)*lev2
else:
raise Exception, "Bad level type for second point."
# Get level crossings
crossI1=IatXval(partsig, sc1, edgeType)
if crossI1.size<=0:
raise Exception, "First point not found."
# Use first crossing
crossI1=crossI1.min()
crossI2=IatXval(partsig, sc2, edgeType)
# Expect second point after first point
crossI2=filterI(crossI2, 'right', crossI1, includeStart=True)
if crossI2.size<=0:
raise Exception, "Second point not found."
# Use first crossing that remains unfiltered
crossI2=crossI2.min()
# Get crossing times
delta=XatI(partscl, crossI2)-XatI(partscl, crossI1)
return delta
def TriseTime(sig, scl,
lev1type='rel', lev1=0.1,
lev2type='rel', lev2=0.9,
t1=None, t2=None):
"""
An alias for :func:`TedgeTime` with *edgeType* set to ``rising``.
"""
return TedgeTime('rising', sig, scl, lev1type, lev1, lev2type, lev2, t1, t2)
def TfallTime(sig, scl,
lev1type='rel', lev1=0.1,
lev2type='rel', lev2=0.9,
t1=None, t2=None):
"""
An alias for :func:`TedgeTime` with *edgeType* set to ``falling``.
"""
return TedgeTime('falling', sig, scl, lev1type, lev1, lev2type, lev2, t1, t2)
def TslewRate(edgeType, sig, scl,
lev1type='rel', lev1=0.1,
lev2type='rel', lev2=0.9,
t1=None, t2=None):
"""
Measures the slew rate of a signal. The slew rate is defined as the
quotient dx/dt where dx denotes the signal difference between the beginning
and the end of signal's rise/fall, while dt denotes the rise/fall time.
Slew rate is always positive.
See :func:`TedgeTime` for the explanation of the function's parameters.
"""
# Get reference levels
(i1, s1, i2, s2)=_refLevel(sig, scl, t1, t2)
if lev1type=='abs':
sl1=lev1
else:
sl1=s1+(s2-s1)*lev1
if lev2type=='abs':
sl2=lev2
else:
sl2=s1+(s2-s1)*lev2
sigDelta=sl2-sl1
# Get edge time
dt=TedgeTime(edgeType, sig, scl, lev1type, lev1, lev2type, lev2, t1, t2)
if dt==0:
raise Exception, "Can't evaluate slew rate if edge time is zero."
# Get slew rate
return abs(sigDelta)/dt
def TsettlingTime(sig, scl,
tolType='rel', tol=0.05,
t1=None, t2=None):
"""
Measures the time (scale interval on scale *scl*) in which signal *sig*
settles within some prescribed tolerance of its final value.
*t1* and *t2* define the scale interval within which the settling time is
measured. The default values of *t1* and *t2* are the first and the last
value of *scl*. The final signal value if the value of the signal
corresponding to point *t2* on the scale.
The 0.0 and the 1.0 relative signal levels are defined as signal levels at
points *t1* and *t2* on the scale.
If *tolType* is ``abs`` the settling time is measured from *t1* to the
point at which the signal remains within *tol* of its final value at *t2*.
If *tolType* is ``rel`` the settling tolerance is defined as *tol* times
the difference between the signal levels corresponding to the 0.0 and 1.0
relative signal level.
"""
# Get reference levels
(i1, s1, i2, s2)=_refLevel(sig, scl, t1, t2)
sigDelta=s2-s1
# Extract interesting part
partsig=XatIrange(sig, i1, i2)
partscl=XatIrange(scl, i1, i2)
# Get tolerance level
if tolType=='abs':
tolInt=tol
elif tolType=='rel':
tolInt=tol*abs(sigDelta)
else:
raise Exception, "Bad tolerance type."
# Get absolute deviation from settled value
sigdev=abs(partsig-s2)
# Find crossing of absolute deviation with tolerance level
crossI=IatXval(sigdev, tolInt, 'any')
if crossI.size<=0:
raise Exception, "No crossing with tolerance level found."
# Take last crossing
cross=crossI[-1]
# Get settling time
delta=XatI(partscl, cross)-partscl[0]
if delta<0:
raise Exception, "This is weird. Negative settling time."
return delta
#------------------------------------------------------------------------------
# Overdrive calculation (generic)
# e.g. Vgs-Vth
class Poverdrive:
"""
Calculates the difference between the values obtained from two driver
functions.
Objects of this class are callable. The calling convention is
``object(name)``. When called it returns the difference between the values
returned by a call to *driver1* with arguments (name, p1) and the value
returned by a call to *driver2* with arguments (name, p2). The difference
is returned as an array. If the size of the array is 1, it is returned as a
scalar (0-dimensional array).
:class:`Poverdrive` can be used for calculating the Vgs-Vth difference of
one or more MOS transistors by defining the measurement script in the
following way::
obj=m.Poverdrive(p, 'vgs', p, 'vth')
retval=map(obj, ['mn2', 'mn3', 'mn9'])
__result=np.array(retval)
The :func:`map` Python builtin function calls the :class:`Poverdrive`
object ``obj`` 3 times, once for every member of the list
``['mn2', 'mn3', 'mn9']`` and collects the return values in
a list which is then returned by ``map`` and stored in ``retval``.
A call to :class:`Poverdrive` object ``obj`` with argument ``mn2`` returns
the result of::
p('mn2', 'vgs')-p('mn2', 'vth')
which is actually the difference between the Vgs and the threshold voltage
of MOS transistor ``mn1``. So ``retval`` is a list holding the values of
the difference between Vgs and the threshold voltage of transistors listed
in ``['mn2', 'mn3', 'mn9']``.
Finally the list is converted to an array because the
:class:`~pyopus.evaluator.performance.PerformanceEvaluator` object can's
handle lists.
The previous measurement script could also be written as a measurement
expression::
np.array(map(m.Poverdrive(p, 'vgs', p, 'vth'), ['mn2', 'mn3', 'mn9']))
Note that during measurement evaluation
(when a :class:`~pyopus.evaluator.performance.PerformanceEvaluator` object
is called) the function :func:`p` accesses device properties calculated by
the simulator while the :mod:`pyopus.evaluator.measure` and :mod:`numpy`
modules are available as :mod:`m` and :mod:`np`.
"""
def __init__(self, driver1, p1, driver2, p2):
self.driver1=driver1
self.p1=p1
self.driver2=driver2
self.p2=p2
def __call__(self, instance):
if type(self.p1) is str:
v1=self.driver1(instance, self.p1)
else:
v1=self.driver1(instance, *self.p1)
if type(self.p2) is str:
v2=self.driver2(instance, self.p2)
else:
v2=self.driver2(instance, *self.p2)
diff=array(v1-v2)
# Scalarize if diff is 1 long
if diff.size==1:
diff=diff[0]
return diff
| gpl-3.0 |
JeanKossaifi/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
materialsvirtuallab/veidt | veidt/tests/test_abstract.py | 2 | 1839 | import unittest
import numpy as np
from veidt.abstract import Describer, Model
import os
import pandas as pd
from sklearn.linear_model import LinearRegression
file_path = os.path.dirname(__file__)
class DummyDescriber(Describer):
def describe(self, obj):
return pd.Series(np.sum(obj))
class DummyModel(Model):
def __init__(self):
self.model = LinearRegression()
def fit(self, x, y):
self.model.fit(x, y)
return self
def predict(self, x):
return self.model.predict(x)
class TestDescrber(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dd = DummyDescriber()
def test_fit(self):
dd2 = self.dd.fit([1, 2, 3])
self.assertEqual(dd2, self.dd)
def test_describe(self):
result = self.dd.describe([1, 2, 3])
self.assertEqual(result.values[0], 6)
def test_describe_all(self):
results = self.dd.describe_all([[1, 1, 1], [2, 2, 2]])
self.assertListEqual(list(results.shape), [2])
results_transform = self.dd.transform([[1, 1, 1], [2, 2, 2]])
self.assertEqual(9, np.sum(results_transform))
class TestModel(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = DummyModel()
def test_fit(self):
model_dummy = self.model.fit([[1, 2], [3, 4]], [[3], [7]])
self.assertEqual(model_dummy, self.model)
def test_predict(self):
self.model.fit([[1, 2], [3, 4]], [[3], [7]])
result = self.model.predict([[1, 5]])
self.assertEqual(result[0], 6)
def test_evaluate(self):
self.model.fit([[1, 2], [3, 4]], [[3], [7]])
error = self.model.evaluate([[1, 2], [3, 4]], [[4, 8]])
# print(error)
self.assertAlmostEqual(error['mae'][0], 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mjudsp/Tsallis | sklearn/feature_selection/tests/test_chi2.py | 56 | 2400 | """
Tests for chi2, currently the only feature selection function designed
specifically to work with sparse matrices.
"""
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
import scipy.stats
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.feature_selection.univariate_selection import _chisquare
from nose.tools import assert_raises
from numpy.testing import assert_equal, assert_array_almost_equal
# Feature 0 is highly informative for class 1;
# feature 1 is the same everywhere;
# feature 2 is a bit informative for class 2.
X = [[2, 1, 2],
[9, 1, 1],
[6, 1, 2],
[0, 1, 2]]
y = [0, 1, 2, 2]
def mkchi2(k):
"""Make k-best chi2 selector"""
return SelectKBest(chi2, k=k)
def test_chi2():
# Test Chi2 feature extraction
chi2 = mkchi2(k=1).fit(X, y)
chi2 = mkchi2(k=1).fit(X, y)
assert_equal(chi2.get_support(indices=True), [0])
assert_equal(chi2.transform(X), np.array(X)[:, [0]])
chi2 = mkchi2(k=2).fit(X, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xsp = csr_matrix(X, dtype=np.float64)
chi2 = mkchi2(k=2).fit(Xsp, y)
assert_equal(sorted(chi2.get_support(indices=True)), [0, 2])
Xtrans = chi2.transform(Xsp)
assert_equal(Xtrans.shape, [Xsp.shape[0], 2])
# == doesn't work on scipy.sparse matrices
Xtrans = Xtrans.toarray()
Xtrans2 = mkchi2(k=2).fit_transform(Xsp, y).toarray()
assert_equal(Xtrans, Xtrans2)
def test_chi2_coo():
# Check that chi2 works with a COO matrix
# (as returned by CountVectorizer, DictVectorizer)
Xcoo = coo_matrix(X)
mkchi2(k=2).fit_transform(Xcoo, y)
# if we got here without an exception, we're safe
def test_chi2_negative():
# Check for proper error on negative numbers in the input X.
X, y = [[0, 1], [-1e-20, 1]], [0, 1]
for X in (X, np.array(X), csr_matrix(X)):
assert_raises(ValueError, chi2, X, y)
def test_chisquare():
# Test replacement for scipy.stats.chisquare against the original.
obs = np.array([[2., 2.],
[1., 1.]])
exp = np.array([[1.5, 1.5],
[1.5, 1.5]])
# call SciPy first because our version overwrites obs
chi_scp, p_scp = scipy.stats.chisquare(obs, exp)
chi_our, p_our = _chisquare(obs, exp)
assert_array_almost_equal(chi_scp, chi_our)
assert_array_almost_equal(p_scp, p_our)
| bsd-3-clause |
stuliveshere/SeismicProcessing2015 | prac1_student_full/toolbox/toolbox.py | 3 | 9756 | import numpy as np
import matplotlib.pyplot as pylab
from matplotlib.widgets import Slider
pylab.rcParams['image.interpolation'] = 'sinc'
#==================================================
# decorators
#==================================================
def io(func):
'''
an io decorator that allows
input/output to be either a filename
(i.e. a string) or an array
'''
def wrapped(*args, **kwargs) :
if type(args[0]) == type(''):
workspace = read(args[0])
else:
workspace = args[0]
result = func(workspace, **kwargs)
if type(result) != type(None):
if type(args[1]) == type(''):
return write(result, args[1])
else:
return result
return wrapped
#==================================================
# display tools
#==================================================
class KeyHandler(object):
def __init__(self, fig, ax, dataset, kwargs):
self.fig = fig
self.ax = ax
self.kwargs = kwargs
self.dataset = dataset
self.start = 0
if kwargs['primary'] == None:
self.slice = self.dataset
else:
keys = np.unique(dataset[kwargs['primary']])
self.keys = keys[::kwargs['step']]
self.nkeys = self.keys.size
self.ensemble()
if 'clip' in kwargs and kwargs['clip'] != 0:
self.clip = kwargs['clip']
else:
self.clip = np.mean(np.abs(self.dataset['trace']))
print 'PySeis Seismic Viewer'
print 'type "h" for help'
self.draw()
def __call__(self, e):
print e.xdata, e.ydata
if e.key == "right":
self.start += 1
self.ensemble()
elif e.key == "left":
self.start -= 1
self.ensemble()
elif e.key == "up":
self.clip /= 1.1
print self.clip
elif e.key == "down":
self.clip *= 1.1
print self.clip
elif e.key == "h":
print "right arrow: next gather"
print "left arrow: last gather"
print "up arrow: hotter"
print "down arrow: colder"
print "clip=", self.clip
else:
return
self.draw()
def draw(self):
self.ax.cla()
self.im = self.ax.imshow(self.slice['trace'].T, aspect='auto', cmap='Greys', vmax =self.clip, vmin=-1*self.clip)
try:
self.ax.set_title('%s = %d' %(self.kwargs['primary'], self.keys[self.start]))
except AttributeError:
pass
self.fig.canvas.draw()
def ensemble(self):
try:
self.slice = self.dataset[self.dataset[self.kwargs['primary']] == self.keys[self.start]]
except IndexError:
self.start = 0
@io
def display(dataset, **kwargs):
'''
iterates through dataset using
left and right keys
parameters required:
primary key
seconary key
step size
'''
fig = pylab.figure()
ax = fig.add_subplot(111)
eventManager = KeyHandler(fig, ax, dataset, kwargs)
fig.canvas.mpl_connect('key_press_event',eventManager)
def scan(dataset):
print " %0-35s: %0-15s %s" %('key', 'min', 'max')
print "========================================="
for key in np.result_type(dataset).descr:
a = np.amin(dataset[key[0]])
b = np.amax(dataset[key[0]])
if (a != 0) and (b != 0):
print "%0-35s %0-15.3f %.3f" %(key, a, b)
print "========================================="
#~ def build_vels(times, velocities, ns=1000, dt=0.001):
#~ '''builds a full velocity trace from a list of vels and times'''
#~ tx = np.linspace(dt, dt*ns, ns)
#~ vels = np.interp(tx, times, velocities)
#~ vels = np.pad(vels, (100,100), 'reflect')
#~ vels = np.convolve(np.ones(100.0)/100.0, vels, mode='same')
#~ vels = vels[100:-100]
#~ return vels
@io
def cp(workspace, **params):
return workspace
@io
def agc(workspace, window=100, **params):
'''
automatic gain control
inputs:
window
'''
vec = np.ones(window, 'f')
func = np.apply_along_axis(lambda m: np.convolve(np.abs(m), vec, mode='same'), axis=-1, arr=workspace['trace'])
workspace['trace'] /= func
workspace['trace'][~np.isfinite(workspace['trace'])] = 0
workspace['trace'] /= np.amax(np.abs(workspace['trace']))
return workspace
def ricker(f, length=0.512, dt=0.001):
t = np.linspace(-length/2, (length-dt)/2, length/dt)
y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2))
y = np.around(y, 10)
inds = np.nonzero(y)[0]
return y[np.amin(inds):np.amax(inds)]
def conv(workspace, wavelet):
workspace['trace'] = np.apply_along_axis(lambda m: np.convolve(m, wavelet, mode='same'), axis=-1, arr=workspace['trace'])
return workspace
@io
def fx(workspace, **params):
f = np.abs(np.fft.rfft(workspace['trace'], axis=-1))
correction = np.mean(np.abs(f), axis=-1).reshape(-1,1)
f /= correction
f = 20.0*np.log10(f)[:,::-1]
freq = np.fft.rfftfreq(params['ns'], params['dt'])
print params['ns'], params['dt']
hmin = np.amin(workspace['cdp'])
hmax = np.amax(workspace['cdp'])
vmin = np.amin(freq)
vmax = np.amax(freq)
extent=[hmin,hmax,vmin,vmax]
pylab.imshow(f.T, aspect='auto', extent=extent)
def db(data):
return 20.0*np.log10(data)
import numpy as np
su_header_dtype = np.dtype([
('tracl', np.int32),
('tracr', np.int32),
('fldr', np.int32),
('tracf', np.int32),
('ep', np.int32),
('cdp', np.int32),
('cdpt', np.int32),
('trid', np.int16),
('nvs', np.int16),
('nhs', np.int16),
('duse', np.int16),
('offset', np.int32),
('gelev', np.int32),
('selev', np.int32),
('sdepth', np.int32),
('gdel', np.int32),
('sdel', np.int32),
('swdep', np.int32),
('gwdep', np.int32),
('scalel', np.int16),
('scalco', np.int16),
('sx', np.int32),
('sy', np.int32),
('gx', np.int32),
('gy', np.int32),
('counit', np.int16),
('wevel', np.int16),
('swevel', np.int16),
('sut', np.int16),
('gut', np.int16),
('sstat', np.int16),
('gstat', np.int16),
('tstat', np.int16),
('laga', np.int16),
('lagb', np.int16),
('delrt', np.int16),
('muts', np.int16),
('mute', np.int16),
('ns', np.uint16),
('dt', np.uint16),
('gain', np.int16),
('igc', np.int16),
('igi', np.int16),
('corr', np.int16),
('sfs', np.int16),
('sfe', np.int16),
('slen', np.int16),
('styp', np.int16),
('stas', np.int16),
('stae', np.int16),
('tatyp', np.int16),
('afilf', np.int16),
('afils', np.int16),
('nofilf', np.int16),
('nofils', np.int16),
('lcf', np.int16),
('hcf', np.int16),
('lcs', np.int16),
('hcs', np.int16),
('year', np.int16),
('day', np.int16),
('hour', np.int16),
('minute', np.int16),
('sec', np.int16),
('timebas', np.int16),
('trwf', np.int16),
('grnors', np.int16),
('grnofr', np.int16),
('grnlof', np.int16),
('gaps', np.int16),
('otrav', np.int16), #179,180
('d1', np.float32), #181,184
('f1', np.float32), #185,188
('d2', np.float32), #189,192
('f2', np.float32), #193, 196
('ShotPoint', np.int32), #197,200
('unscale', np.int16), #201, 204
('TraceValueMeasurementUnit', np.int16),
('TransductionConstantMantissa', np.int32),
('TransductionConstantPower', np.int16),
('TransductionUnit', np.int16),
('TraceIdentifier', np.int16),
('ScalarTraceHeader', np.int16),
('SourceType', np.int16),
('SourceEnergyDirectionMantissa', np.int32),
('SourceEnergyDirectionExponent', np.int16),
('SourceMeasurementMantissa', np.int32),
('SourceMeasurementExponent', np.int16),
('SourceMeasurementUnit', np.int16),
('UnassignedInt1', np.int32),
('ns1', np.int32),
])
def typeSU(ns):
return np.dtype(su_header_dtype.descr + [('trace', ('<f4',ns))])
def readSUheader(filename):
raw = open(filename, 'rb').read()
return np.fromstring(raw, dtype=su_header_dtype, count=1)
def read(filename=None):
if filename == None:
raw= sys.stdin.read()
else:
raw = open(filename, 'rb').read()
return readData(raw)
def readData(raw):
su_header = np.fromstring(raw, dtype=su_header_dtype, count=1)
ns = su_header['ns'][0]
file_dtype = typeSU(ns)
data = np.fromstring(raw, dtype=file_dtype)
return data
def write(data, filename=None):
if filename == None:
data.tofile(sys.stdout)
else:
data.tofile(filename)
| mit |
rhyolight/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
misdoro/python-ase | ase/dft/stm.py | 2 | 6272 | import pickle
import numpy as np
class STM:
def __init__(self, atoms, symmetries=None, use_density=False):
"""Scanning tunneling microscope.
atoms: Atoms object or filename
Atoms to scan or name of file to read LDOS from.
symmetries: list of int
List of integers 0, 1, and/or 2 indicating which surface
symmetries have been used to reduce the number of k-points
for the DFT calculation. The three integers correspond to
the following three symmetry operations::
[-1 0] [ 1 0] [ 0 1]
[ 0 1] [ 0 -1] [ 1 0]
use_density: bool
Use the electron density instead of the LDOS.
"""
self.use_density = use_density
if isinstance(atoms, str):
with open(atoms, 'rb') as f:
self.ldos, self.bias, self.cell = pickle.load(f)
self.atoms = None
else:
self.atoms = atoms
self.cell = atoms.cell
self.bias = None
self.ldos = None
assert not self.cell[2, :2].any() and not self.cell[:2, 2].any()
self.symmetries = symmetries or []
def calculate_ldos(self, bias):
"""Calculate local density of states for given bias."""
if self.ldos is not None and bias == self.bias:
return
self.bias = bias
calc = self.atoms.calc
if self.use_density:
self.ldos = calc.get_pseudo_density()
return
if bias < 0:
emin = bias
emax = 0.0
else:
emin = 0
emax = bias
nbands = calc.get_number_of_bands()
weights = calc.get_k_point_weights()
nkpts = len(weights)
nspins = calc.get_number_of_spins()
eigs = np.array([[calc.get_eigenvalues(k, s)
for k in range(nkpts)]
for s in range(nspins)])
eigs -= calc.get_fermi_level()
ldos = 0.0
for s in range(nspins):
for k in range(nkpts):
for n in range(nbands):
e = eigs[s, k, n]
if emin < e < emax:
psi = calc.get_pseudo_wave_function(n, k, s)
ldos += weights[k] * (psi * np.conj(psi)).real
if 0 in self.symmetries:
# (x,y) -> (-x,y)
ldos[1:] += ldos[:0:-1].copy()
ldos[1:] *= 0.5
if 1 in self.symmetries:
# (x,y) -> (x,-y)
ldos[:, 1:] += ldos[:, :0:-1].copy()
ldos[:, 1:] *= 0.5
if 2 in self.symmetries:
# (x,y) -> (y,x)
ldos += ldos.transpose((1, 0, 2)).copy()
ldos *= 0.5
self.ldos = ldos
def write(self, filename='stm.pckl'):
"""Write local density of states to pickle file."""
with open(filename, 'wb') as f:
pickle.dump((self.ldos, self.bias, self.cell), f,
protocol=pickle.HIGHEST_PROTOCOL)
def get_averaged_current(self, bias, z):
"""Calculate avarage current at height z.
Use this to get an idea of what current to use when scanning."""
self.calculate_ldos(bias)
nz = self.ldos.shape[2]
# Find grid point:
n = z / self.cell[2, 2] * nz
dn = n - np.floor(n)
n = int(n) % nz
# Average and do linear interpolation:
return ((1 - dn) * self.ldos[:, :, n].mean() +
dn * self.ldos[:, :, (n + 1) % nz].mean())
def scan(self, bias, current, z0=None, repeat=(1, 1)):
"""Constant current 2-d scan.
Returns three 2-d arrays (x, y, z) containing x-coordinates,
y-coordinates and heights. These three arrays can be passed to
matplotlibs contourf() function like this:
>>> import matplotlib.pyplot as plt
>>> plt.gca(aspect='equal')
>>> plt.contourf(x, y, z)
>>> plt.show()
"""
self.calculate_ldos(bias)
L = self.cell[2, 2]
nz = self.ldos.shape[2]
h = L / nz
ldos = self.ldos.reshape((-1, nz))
heights = np.empty(ldos.shape[0])
for i, a in enumerate(ldos):
heights[i] = find_height(a, current, h, z0)
s0 = heights.shape = self.ldos.shape[:2]
heights = np.tile(heights, repeat)
s = heights.shape
ij = np.indices(s, dtype=float).reshape((2, -1)).T
x, y = np.dot(ij / s0, self.cell[:2, :2]).T.reshape((2,) + s)
return x, y, heights
def linescan(self, bias, current, p1, p2, npoints=50, z0=None):
"""Constant current line scan.
Example::
stm = STM(...)
z = ... # tip position
c = stm.get_averaged_current(-1.0, z)
stm.linescan(-1.0, c, (1.2, 0.0), (1.2, 3.0))
"""
heights = self.scan(bias, current, z0)[2]
p1 = np.asarray(p1, float)
p2 = np.asarray(p2, float)
d = p2 - p1
s = np.dot(d, d)**0.5
cell = self.cell[:2, :2]
shape = np.array(heights.shape, float)
M = np.linalg.inv(cell)
line = np.empty(npoints)
for i in range(npoints):
p = p1 + i * d / (npoints - 1)
q = np.dot(p, M) * shape
line[i] = interpolate(q, heights)
return np.linspace(0, s, npoints), line
def interpolate(q, heights):
qi = q.astype(int)
f = q - qi
g = 1 - f
qi %= heights.shape
n0, m0 = qi
n1, m1 = (qi + 1) % heights.shape
z = (g[0] * g[1] * heights[n0, m0] +
f[0] * g[1] * heights[n1, m0] +
g[0] * f[1] * heights[n0, m1] +
f[0] * f[1] * heights[n1, m1])
return z
def find_height(ldos, current, h, z0=None):
if z0 is None:
n = len(ldos) - 2
else:
n = int(z0 / h)
while n >= 0:
if ldos[n] > current:
break
n -= 1
else:
return 0.0
c2, c1 = ldos[n:n + 2]
return (n + 1 - (current - c1) / (c2 - c1)) * h
| gpl-2.0 |
Statoil/libecl | python/ecl/grid/ecl_grid.py | 1 | 58619 | # Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_grid.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module to load and query ECLIPSE GRID/EGRID files.
The ecl_grid module contains functionality to load and query an
ECLIPSE grid file; it is currently not possible to manipulate or let
alone create a grid with ecl_grid module. The functionality is
implemented in the EclGrid class. The ecl_grid module is a thin
wrapper around the ecl_grid.c implementation from the libecl library.
"""
import ctypes
import warnings
import numpy
import pandas
import sys
import os.path
import math
import itertools
from cwrap import CFILE, BaseCClass, load, open as copen
from ecl import EclPrototype
from ecl.util.util import monkey_the_camel
from ecl.util.util import IntVector
from ecl import EclDataType, EclUnitTypeEnum, EclTypeEnum
from ecl.eclfile import EclKW, FortIO
from ecl.grid import Cell
class EclGrid(BaseCClass):
"""
Class for loading and internalizing ECLIPSE GRID/EGRID files.
"""
TYPE_NAME = "ecl_grid"
_fread_alloc = EclPrototype("void* ecl_grid_load_case__(char*, bool)", bind = False)
_grdecl_create = EclPrototype("ecl_grid_obj ecl_grid_alloc_GRDECL_kw(int, int, int, ecl_kw, ecl_kw, ecl_kw, ecl_kw)", bind = False)
_alloc_rectangular = EclPrototype("ecl_grid_obj ecl_grid_alloc_rectangular(int, int, int, double, double, double, int*)", bind = False)
_exists = EclPrototype("bool ecl_grid_exists(char*)", bind = False)
_get_numbered_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr_from_lgr_nr(ecl_grid, int)")
_get_named_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_lgr(ecl_grid, char*)")
_get_cell_lgr = EclPrototype("ecl_grid_ref ecl_grid_get_cell_lgr1(ecl_grid, int)")
_num_coarse_groups = EclPrototype("int ecl_grid_get_num_coarse_groups(ecl_grid)")
_in_coarse_group1 = EclPrototype("bool ecl_grid_cell_in_coarse_group1(ecl_grid, int)")
_free = EclPrototype("void ecl_grid_free(ecl_grid)")
_get_nx = EclPrototype("int ecl_grid_get_nx(ecl_grid)")
_get_ny = EclPrototype("int ecl_grid_get_ny(ecl_grid)")
_get_nz = EclPrototype("int ecl_grid_get_nz(ecl_grid)")
_get_global_size = EclPrototype("int ecl_grid_get_global_size(ecl_grid)")
_get_active = EclPrototype("int ecl_grid_get_active_size(ecl_grid)")
_get_active_fracture = EclPrototype("int ecl_grid_get_nactive_fracture(ecl_grid)")
_get_name = EclPrototype("char* ecl_grid_get_name(ecl_grid)")
_ijk_valid = EclPrototype("bool ecl_grid_ijk_valid(ecl_grid, int, int, int)")
_get_active_index3 = EclPrototype("int ecl_grid_get_active_index3(ecl_grid, int, int, int)")
_get_global_index3 = EclPrototype("int ecl_grid_get_global_index3(ecl_grid, int, int, int)")
_get_active_index1 = EclPrototype("int ecl_grid_get_active_index1(ecl_grid, int)")
_get_active_fracture_index1 = EclPrototype("int ecl_grid_get_active_fracture_index1(ecl_grid, int)")
_get_global_index1A = EclPrototype("int ecl_grid_get_global_index1A(ecl_grid, int)")
_get_global_index1F = EclPrototype("int ecl_grid_get_global_index1F(ecl_grid, int)")
_get_ijk1 = EclPrototype("void ecl_grid_get_ijk1(ecl_grid, int, int*, int*, int*)")
_get_ijk1A = EclPrototype("void ecl_grid_get_ijk1A(ecl_grid, int, int*, int*, int*)")
_get_xyz3 = EclPrototype("void ecl_grid_get_xyz3(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1 = EclPrototype("void ecl_grid_get_xyz1(ecl_grid, int, double*, double*, double*)")
_get_cell_corner_xyz1 = EclPrototype("void ecl_grid_get_cell_corner_xyz1(ecl_grid, int, int, double*, double*, double*)")
_get_corner_xyz = EclPrototype("void ecl_grid_get_corner_xyz(ecl_grid, int, int, int, double*, double*, double*)")
_get_xyz1A = EclPrototype("void ecl_grid_get_xyz1A(ecl_grid, int, double*, double*, double*)")
_get_ij_xy = EclPrototype("bool ecl_grid_get_ij_from_xy(ecl_grid, double, double, int, int*, int*)")
_get_ijk_xyz = EclPrototype("int ecl_grid_get_global_index_from_xyz(ecl_grid, double, double, double, int)")
_cell_contains = EclPrototype("bool ecl_grid_cell_contains_xyz1(ecl_grid, int, double, double, double)")
_cell_regular = EclPrototype("bool ecl_grid_cell_regular1(ecl_grid, int)")
_num_lgr = EclPrototype("int ecl_grid_get_num_lgr(ecl_grid)")
_has_numbered_lgr = EclPrototype("bool ecl_grid_has_lgr_nr(ecl_grid, int)")
_has_named_lgr = EclPrototype("bool ecl_grid_has_lgr(ecl_grid, char*)")
_grid_value = EclPrototype("double ecl_grid_get_property(ecl_grid, ecl_kw, int, int, int)")
_get_cell_volume = EclPrototype("double ecl_grid_get_cell_volume1(ecl_grid, int)")
_get_cell_thickness = EclPrototype("double ecl_grid_get_cell_thickness1(ecl_grid, int)")
_get_cell_dx = EclPrototype("double ecl_grid_get_cell_dx1(ecl_grid, int)")
_get_cell_dy = EclPrototype("double ecl_grid_get_cell_dy1(ecl_grid, int)")
_get_depth = EclPrototype("double ecl_grid_get_cdepth1(ecl_grid, int)")
_fwrite_grdecl = EclPrototype("void ecl_grid_grdecl_fprintf_kw(ecl_grid, ecl_kw, char*, FILE, double)")
_load_column = EclPrototype("void ecl_grid_get_column_property(ecl_grid, ecl_kw, int, int, double_vector)")
_get_top = EclPrototype("double ecl_grid_get_top2(ecl_grid, int, int)")
_get_top1A = EclPrototype("double ecl_grid_get_top1A(ecl_grid, int)")
_get_bottom = EclPrototype("double ecl_grid_get_bottom2(ecl_grid, int, int)")
_locate_depth = EclPrototype("int ecl_grid_locate_depth(ecl_grid, double, int, int)")
_invalid_cell = EclPrototype("bool ecl_grid_cell_invalid1(ecl_grid, int)")
_valid_cell = EclPrototype("bool ecl_grid_cell_valid1(ecl_grid, int)")
_get_distance = EclPrototype("void ecl_grid_get_distance(ecl_grid, int, int, double*, double*, double*)")
_fprintf_grdecl2 = EclPrototype("void ecl_grid_fprintf_grdecl2(ecl_grid, FILE, ecl_unit_enum) ")
_fwrite_GRID2 = EclPrototype("void ecl_grid_fwrite_GRID2(ecl_grid, char*, ecl_unit_enum)")
_fwrite_EGRID2 = EclPrototype("void ecl_grid_fwrite_EGRID2(ecl_grid, char*, ecl_unit_enum)")
_equal = EclPrototype("bool ecl_grid_compare(ecl_grid, ecl_grid, bool, bool)")
_dual_grid = EclPrototype("bool ecl_grid_dual_grid(ecl_grid)")
_init_actnum = EclPrototype("void ecl_grid_init_actnum_data(ecl_grid, int*)")
_compressed_kw_copy = EclPrototype("void ecl_grid_compressed_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_global_kw_copy = EclPrototype("void ecl_grid_global_kw_copy(ecl_grid, ecl_kw, ecl_kw)")
_create_volume_keyword = EclPrototype("ecl_kw_obj ecl_grid_alloc_volume_kw(ecl_grid, bool)")
_use_mapaxes = EclPrototype("bool ecl_grid_use_mapaxes(ecl_grid)")
_export_coord = EclPrototype("ecl_kw_obj ecl_grid_alloc_coord_kw(ecl_grid)")
_export_zcorn = EclPrototype("ecl_kw_obj ecl_grid_alloc_zcorn_kw(ecl_grid)")
_export_actnum = EclPrototype("ecl_kw_obj ecl_grid_alloc_actnum_kw(ecl_grid)")
_export_mapaxes = EclPrototype("ecl_kw_obj ecl_grid_alloc_mapaxes_kw(ecl_grid)")
_get_unit_system = EclPrototype("ecl_unit_enum ecl_grid_get_unit_system(ecl_grid)")
_export_index_frame = EclPrototype("void ecl_grid_export_index(ecl_grid, int*, int*, bool)")
_export_data_as_int = EclPrototype("void ecl_grid_export_data_as_int(int, int*, ecl_kw, int*)", bind = False)
_export_data_as_double = EclPrototype("void ecl_grid_export_data_as_double(int, int*, ecl_kw, double*)", bind = False)
_export_volume = EclPrototype("void ecl_grid_export_volume(ecl_grid, int, int*, double*)")
_export_position = EclPrototype("void ecl_grid_export_position(ecl_grid, int, int*, double*)")
_export_corners = EclPrototype("void export_corners(ecl_grid, int, int*, double*)")
@classmethod
def load_from_grdecl(cls, filename):
"""Will create a new EclGrid instance from grdecl file.
This function will scan the input file @filename and look for
the keywords required to build a grid. The following keywords
are required:
SPECGRID ZCORN COORD
In addition the function will look for and use the ACTNUM and
MAPAXES keywords if they are found; if ACTNUM is not found all
cells are assumed to be active.
Slightly more exotic grid concepts like dual porosity, NNC
mapping, LGR and coarsened cells will be completely ignored;
if you need such concepts you must have an EGRID file and use
the default EclGrid() constructor - that is also considerably
faster.
"""
if os.path.isfile(filename):
with copen(filename) as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclDataType.ECL_INT, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
try:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclDataType.ECL_INT)
except ValueError:
actnum = None
try:
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
except ValueError:
mapaxes = None
return EclGrid.create(specgrid, zcorn, coord, actnum, mapaxes)
else:
raise IOError("No such file:%s" % filename)
@classmethod
def load_from_file(cls, filename):
"""
Will inspect the @filename argument and create a new EclGrid instance.
"""
if FortIO.isFortranFile(filename):
return EclGrid(filename)
else:
return EclGrid.loadFromGrdecl(filename)
@classmethod
def create(cls, specgrid, zcorn, coord, actnum, mapaxes=None):
"""
Create a new grid instance from existing keywords.
This is a class method which can be used to create an EclGrid
instance based on the EclKW instances @specgrid, @zcorn,
@coord and @actnum. An ECLIPSE EGRID file contains the
SPECGRID, ZCORN, COORD and ACTNUM keywords, so a somewhat
involved way to create a EclGrid instance could be:
file = ecl.EclFile("ECLIPSE.EGRID")
specgrid_kw = file.iget_named_kw("SPECGRID", 0)
zcorn_kw = file.iget_named_kw("ZCORN", 0)
coord_kw = file.iget_named_kw("COORD", 0)
actnum_kw = file.iget_named_kw("ACTNUM", 0)
grid = EclGrid.create(specgrid_kw, zcorn_kw, coord_kw, actnum_kw)
If you are so inclined ...
"""
return cls._grdecl_create(specgrid[0], specgrid[1], specgrid[2], zcorn, coord, actnum, mapaxes)
@classmethod
def create_rectangular(cls, dims, dV, actnum=None):
"""
Will create a new rectangular grid. @dims = (nx,ny,nz) @dVg = (dx,dy,dz)
With the default value @actnum == None all cells will be active,
"""
warnings.warn("EclGrid.createRectangular is deprecated. " +
"Please used the similar method in EclGridGenerator!",
DeprecationWarning)
if actnum is None:
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], None)
else:
if not isinstance(actnum, IntVector):
tmp = IntVector(initial_size=len(actnum))
for (index, value) in enumerate(actnum):
tmp[index] = value
actnum = tmp
if not len(actnum) == dims[0] * dims[1] * dims[2]:
raise ValueError("ACTNUM size mismatch: len(ACTNUM):%d Expected:%d" % (len(actnum), dims[0] * dims[1] * dims[2]))
ecl_grid = cls._alloc_rectangular(dims[0], dims[1], dims[2], dV[0], dV[1], dV[2], actnum.getDataPtr())
# If we have not succeeded in creatin the grid we *assume* the
# error is due to a failed malloc.
if ecl_grid is None:
raise MemoryError("Failed to allocated regualar grid")
return ecl_grid
def __init__(self, filename, apply_mapaxes=True):
"""
Will create a grid structure from an EGRID or GRID file.
"""
c_ptr = self._fread_alloc(filename, apply_mapaxes)
if c_ptr:
super(EclGrid, self).__init__(c_ptr)
else:
raise IOError("Loading grid from:%s failed" % filename)
def free(self):
self._free()
def _nicename(self):
"""name is often full path to grid, if so, output basename, else name"""
name = self.getName()
if os.path.isfile(name):
name = os.path.basename(name)
return name
def __repr__(self):
"""Returns, e.g.:
EclGrid("NORNE_ATW2013.EGRID", 46x112x22, global_size=113344, active_size=44431) at 0x28c4a70
"""
name = self._nicename()
if name:
name = '"%s", ' % name
g_size = self.getGlobalSize()
a_size = self.getNumActive()
xyz_s = '%dx%dx%d' % (self.getNX(),self.getNY(),self.getNZ())
return self._create_repr('%s%s, global_size=%d, active_size=%d' % (name, xyz_s, g_size, a_size))
def __len__(self):
"""
len(grid) wil return the total number of cells.
"""
return self._get_global_size()
def equal(self, other, include_lgr=True, include_nnc=False, verbose=False):
"""
Compare the current grid with the other grid.
"""
if not isinstance(other, EclGrid):
raise TypeError("The other argument must be an EclGrid instance")
return self._equal(other, include_lgr, include_nnc, verbose)
def dual_grid(self):
"""Is this grid dual porosity model?"""
return self._dual_grid()
def get_dims(self):
"""A tuple of four elements: (nx, ny, nz, nactive)."""
return (self.getNX(),
self.getNY(),
self.getNZ(),
self.getNumActive())
@property
def nx(self):
return self._get_nx()
def get_nx(self):
""" The number of elements in the x direction"""
return self._get_nx()
@property
def ny(self):
return self._get_ny()
def get_ny(self):
""" The number of elements in the y direction"""
return self._get_ny()
@property
def nz(self):
return self._get_nz()
def get_nz(self):
""" The number of elements in the z direction"""
return self._get_nz()
def get_global_size(self):
"""Returns the total number of cells in this grid"""
return self._get_global_size()
def get_num_active(self):
"""The number of active cells in the grid."""
return self._get_active()
def get_num_active_fracture(self):
"""The number of active cells in the grid."""
return self._get_active_fracture()
def get_bounding_box_2d(self, layer=0, lower_left=None, upper_right=None):
if 0 <= layer <= self.getNZ():
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
if lower_left is None:
i1 = 0
j1 = 0
else:
i1,j1 = lower_left
if not 0 < i1 < self.getNX():
raise ValueError("lower_left i coordinate invalid")
if not 0 < j1 < self.getNY():
raise ValueError("lower_left j coordinate invalid")
if upper_right is None:
i2 = self.getNX()
j2 = self.getNY()
else:
i2,j2 = upper_right
if not 1 < i2 <= self.getNX():
raise ValueError("upper_right i coordinate invalid")
if not 1 < j2 <= self.getNY():
raise ValueError("upper_right j coordinate invalid")
if not i1 < i2:
raise ValueError("Must have lower_left < upper_right")
if not j1 < j2:
raise ValueError("Must have lower_left < upper_right")
self._get_corner_xyz(i1, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p0 = (x.value, y.value)
self._get_corner_xyz(i2, j1, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p1 = (x.value, y.value )
self._get_corner_xyz( i2, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p2 = (x.value, y.value )
self._get_corner_xyz(i1, j2, layer, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
p3 = (x.value, y.value )
return (p0,p1,p2,p3)
else:
raise ValueError("Invalid layer value:%d Valid range: [0,%d]" % (layer, self.getNZ()))
def get_name(self):
"""
Name of the current grid, returns a string.
For the main grid this is the filename given to the
constructor when loading the grid; for an LGR this is the name
of the LGR. If the grid instance has been created with the
create() classmethod this can be None.
"""
n = self._get_name()
return str(n) if n else ''
def cell(self, global_index=None, active_index=None, i=None, j=None, k=None):
if global_index is not None:
return Cell(self, global_index)
if active_index is not None:
return Cell(self, self.global_index(active_index=active_index))
if i is not None:
return Cell(self, self.global_index(ijk=(i,j,k)))
def __getitem__(self, global_index):
if isinstance(global_index, tuple):
i,j,k = global_index
return self.cell(i=i, j=j, k=k)
return self.cell(global_index=global_index)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def cells(self, active=False):
"""Iterator over all the (active) cells"""
if not active:
for c in self:
yield c
else:
for i in range(self.get_num_active()):
yield self.cell(active_index=i)
def global_index(self, active_index=None, ijk=None):
"""
Will convert either active_index or (i,j,k) to global index.
"""
return self.__global_index(active_index=active_index, ijk=ijk)
def __global_index(self, active_index=None, global_index=None, ijk=None):
"""
Will convert @active_index or @ijk to global_index.
This method will convert @active_index or @ijk to a global
index. Exactly one of the arguments @active_index,
@global_index or @ijk must be supplied.
The method is used extensively internally in the EclGrid
class; most methods which take coordinate input pass through
this method to normalize the coordinate representation.
"""
set_count = 0
if not active_index is None:
set_count += 1
if not global_index is None:
set_count += 1
if ijk:
set_count += 1
if not set_count == 1:
raise ValueError("Exactly one of the kewyord arguments active_index, global_index or ijk must be set")
if not active_index is None:
global_index = self._get_global_index1A( active_index)
elif ijk:
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
i,j,k = ijk
if not 0 <= i < nx:
raise IndexError("Invalid value i:%d Range: [%d,%d)" % (i, 0, nx))
if not 0 <= j < ny:
raise IndexError("Invalid value j:%d Range: [%d,%d)" % (j, 0, ny))
if not 0 <= k < nz:
raise IndexError("Invalid value k:%d Range: [%d,%d)" % (k, 0, nz))
global_index = self._get_global_index3(i,j,k)
else:
if not 0 <= global_index < self.getGlobalSize():
raise IndexError("Invalid value global_index:%d Range: [%d,%d)" % (global_index, 0, self.getGlobalSize()))
return global_index
def get_active_index(self, ijk=None, global_index=None):
"""
Lookup active index based on ijk or global index.
Will determine the active_index of a cell, based on either
@ijk = (i,j,k) or @global_index. If the cell specified by the
input arguments is not active the function will return -1.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_index1(gi)
def get_active_fracture_index(self, ijk=None, global_index=None):
"""
For dual porosity - get the active fracture index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
return self._get_active_fracture_index1(gi)
def get_global_index1F(self, active_fracture_index):
"""
Will return the global index corresponding to active fracture index.
"""
return self._get_global_index1F(active_fracture_index)
def cell_invalid(self, ijk=None, global_index=None, active_index=None):
"""
Tries to check if a cell is invalid.
Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size, and should **NOT** be
used in calculations involving real world coordinates. To
protect against this a heuristic is used identify such cells
and mark them as invalid. There might be other sources than
numerical aquifers to this problem.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._invalid_cell(gi)
def valid_cell_geometry(self, ijk=None, global_index=None, active_index=None):
"""Checks if the cell has valid geometry.
There are at least two reasons why a cell might have invalid
gemetry:
1. In the case of GRID files it is not necessary to supply
the geometry for all the cells; in that case this
function will return false for cells which do not have
valid coordinates.
2. Cells which are used to represent numerical aquifers are
typically located in UTM position (0,0); these cells have
completely whacked up shape and size; these cells are
identified by a heuristic - which might fail
If the validCellGeometry() returns false for a particular
cell functions which calculate cell volumes, real world
coordinates and so on - should not be used.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk, active_index=active_index)
return self._valid_cell(gi)
def active(self, ijk=None, global_index=None):
"""
Is the cell active?
See documentation og get_xyz() for explanation of parameters
@ijk and @global_index.
"""
gi = self.__global_index(global_index=global_index, ijk=ijk)
active_index = self._get_active_index1(gi)
if active_index >= 0:
return True
else:
return False
def get_global_index(self, ijk=None, active_index=None):
"""
Lookup global index based on ijk or active index.
"""
gi = self.__global_index(active_index=active_index, ijk=ijk)
return gi
def get_ijk(self, active_index=None, global_index=None):
"""
Lookup (i,j,k) for a cell, based on either active index or global index.
The return value is a tuple with three elements (i,j,k).
"""
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
gi = self.__global_index(active_index=active_index, global_index=global_index)
self._get_ijk1(gi, ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
def get_xyz(self, active_index=None, global_index=None, ijk=None):
"""
Find true position of cell center.
Will return world position of the center of a cell in the
grid. The return value is a tuple of three elements:
(utm_x, utm_y, depth).
The cells of a grid can be specified in three different ways:
(i,j,k) : As a tuple of i,j,k values.
global_index : A number in the range [0,nx*ny*nz). The
global index is related to (i,j,k) as:
global_index = i + j*nx + k*nx*ny
active_index : A number in the range [0,nactive).
For many of the EclGrid methods a cell can be specified using
any of these three methods. Observe that one and only method is
allowed:
OK:
pos1 = grid.get_xyz(active_index=100)
pos2 = grid.get_xyz(ijk=(10,20,7))
Crash and burn:
pos3 = grid.get_xyz(ijk=(10,20,7), global_index=10)
pos4 = grid.get_xyz()
All the indices in the EclGrid() class are zero offset, this
is in contrast to ECLIPSE which has an offset 1 interface.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_xyz1(gi, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_pos(self, i, j, k):
"""Will return the (x,y,z) for the node given by (i,j,k).
Observe that this method does not consider cells, but the
nodes in the grid. This means that the valid input range for
i,j and k are are upper end inclusive. To get the four
bounding points of the lower layer of the grid:
p0 = grid.getNodePos(0, 0, 0)
p1 = grid.getNodePos(grid.getNX(), 0, 0)
p2 = grid.getNodePos(0, grid.getNY(), 0)
p3 = grid.getNodePos(grid.getNX(), grid.getNY(), 0)
"""
if not 0 <= i <= self.getNX():
raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i, self.getNX()))
if not 0 <= j <= self.getNY():
raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j, self.getNY()))
if not 0 <= k <= self.getNZ():
raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k, self.getNZ()))
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_corner_xyz(i,j,k, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_cell_corner(self, corner_nr, active_index=None, global_index=None, ijk=None):
"""
Will look up xyz of corner nr @corner_nr
lower layer: upper layer
2---3 6---7
| | | |
0---1 4---5
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
x = ctypes.c_double()
y = ctypes.c_double()
z = ctypes.c_double()
self._get_cell_corner_xyz1(gi, corner_nr, ctypes.byref(x), ctypes.byref(y), ctypes.byref(z))
return (x.value, y.value, z.value)
def get_node_xyz(self, i,j,k):
"""
This function returns the position of Vertex (i,j,k).
The coordinates are in the inclusive interval [0,nx] x [0,ny] x [0,nz].
"""
nx = self.getNX()
ny = self.getNY()
nz = self.getNZ()
corner = 0
if i == nx:
i -= 1
corner += 1
if j == ny:
j -= 1
corner += 2
if k == nz:
k -= 1
corner += 4
if self._ijk_valid(i, j, k):
return self.getCellCorner(corner, global_index=i + j*nx + k*nx*ny)
else:
raise IndexError("Invalid coordinates: (%d,%d,%d) " % (i,j,k))
def get_layer_xyz(self, xy_corner, layer):
nx = self.getNX()
(j, i) = divmod(xy_corner, nx + 1)
k = layer
return self.getNodeXYZ(i,j,k)
def distance(self, global_index1, global_index2):
dx = ctypes.c_double()
dy = ctypes.c_double()
dz = ctypes.c_double()
self._get_distance(global_index1, global_index2, ctypes.byref(dx), ctypes.byref(dy), ctypes.byref(dz))
return (dx.value, dy.value, dz.value)
def depth(self, active_index=None, global_index=None, ijk=None):
"""
Depth of the center of a cell.
Returns the depth of the center of the cell given by
@active_index, @global_index or @ijk. See method get_xyz() for
documentation of @active_index, @global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_depth( gi)
def top(self, i, j):
"""
Top of the reservoir; in the column (@i, @j).
Returns average depth of the four top corners.
"""
return self._get_top(i, j)
def top_active(self, i, j):
"""
Top of the active part of the reservoir; in the column (@i, @j).
Raises ValueError if (i,j) column is inactive.
"""
for k in range(self.getNZ()):
a_idx = self.get_active_index(ijk=(i,j,k))
if a_idx >= 0:
return self._get_top1A(a_idx)
raise ValueError('No active cell in column (%d,%d)' % (i,j))
def bottom(self, i, j):
"""
Bottom of the reservoir; in the column (@i, @j).
"""
return self._get_bottom( i, j)
def locate_depth(self, depth, i, j):
"""
Will locate the k value of cell containing specified depth.
Will scan through the grid column specified by the input
arguments @i and @j and search for a cell containing the depth
given by input argument @depth. The return value is the k
value of cell containing @depth.
If @depth is above the top of the reservoir the function will
return -1, and if @depth is below the bottom of the reservoir
the function will return -nz.
"""
return self._locate_depth( depth, i, j)
def find_cell(self, x, y, z, start_ijk=None):
"""
Lookup cell containg true position (x,y,z).
Will locate the cell in the grid which contains the true
position (@x,@y,@z), the return value is as a triplet
(i,j,k). The underlying C implementation is not veeery
efficient, and can potentially take quite long time. If you
provide a good intial guess with the parameter @start_ijk (a
tuple (i,j,k)) things can speed up quite substantially.
If the location (@x,@y,@z) can not be found in the grid, the
method will return None.
"""
start_index = 0
if start_ijk:
start_index = self.__global_index(ijk=start_ijk)
global_index = self._get_ijk_xyz(x, y, z, start_index)
if global_index >= 0:
i = ctypes.c_int()
j = ctypes.c_int()
k = ctypes.c_int()
self._get_ijk1(global_index,
ctypes.byref(i), ctypes.byref(j), ctypes.byref(k))
return (i.value, j.value, k.value)
return None
def cell_contains(self, x, y, z, active_index=None, global_index=None, ijk=None):
"""
Will check if the cell contains point given by world
coordinates (x,y,z).
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_contains(gi, x,y,z)
def find_cell_xy(self, x, y, k):
"""Will find the i,j of cell with utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
if 0 <= k <= self.getNZ():
i = ctypes.c_int()
j = ctypes.c_int()
ok = self._get_ij_xy(x,y,k, ctypes.byref(i), ctypes.byref(j))
if ok:
return (i.value, j.value)
else:
raise ValueError("Could not find the point:(%g,%g) in layer:%d" % (x,y,k))
else:
raise IndexError("Invalid layer value:%d" % k)
def find_cell_corner_xy(self, x, y, k):
"""Will find the corner nr of corner closest to utm coordinates x,y.
The @k input is the layer you are interested in, the allowed
values for k are [0,nz]. If the coordinates (x,y) are found to
be outside the grid a ValueError exception is raised.
"""
i,j = self.findCellXY(x,y,k)
if k == self.getNZ():
k -= 1
corner_shift = 4
else:
corner_shift = 0
nx = self.getNX()
x0,y0,z0 = self.getCellCorner(corner_shift, ijk=(i,j,k))
d0 = math.sqrt((x0 - x)*(x0 - x) + (y0 - y)*(y0 - y))
c0 = i + j*(nx + 1)
x1,y1,z1 = self.getCellCorner(1 + corner_shift, ijk=(i,j,k))
d1 = math.sqrt((x1 - x)*(x1 - x) + (y1 - y)*(y1 - y))
c1 = i + 1 + j*(nx + 1)
x2,y2,z2 = self.getCellCorner(2 + corner_shift, ijk=(i,j,k))
d2 = math.sqrt((x2 - x)*(x2 - x) + (y2 - y)*(y2 - y))
c2 = i + (j + 1)*(nx + 1)
x3,y3,z3 = self.getCellCorner(3 + corner_shift, ijk=(i,j,k))
d3 = math.sqrt((x3 - x)*(x3 - x) + (y3 - y)*(y3 - y))
c3 = i + 1 + (j + 1)*(nx + 1)
l = [(d0, c0), (d1,c1), (d2, c2), (d3,c3)]
l.sort(key=lambda k: k[0])
return l[0][1]
def cell_regular(self, active_index=None, global_index=None, ijk=None):
"""
The ECLIPSE grid models often contain various degenerate cells,
which are twisted, have overlapping corners or what not. This
function gives a moderate sanity check on a cell, essentially
what the function does is to check if the cell contains it's
own centerpoint - which is actually not as trivial as it
sounds.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._cell_regular( gi)
def cell_volume(self, active_index=None, global_index=None, ijk=None):
"""
Calculate the volume of a cell.
Will calculate the total volume of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_volume(gi)
def cell_dz(self, active_index=None, global_index=None, ijk=None):
"""
The thickness of a cell.
Will calculate the (average) thickness of the cell. See method
get_xyz() for documentation of @active_index, @global_index
and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
return self._get_cell_thickness( gi)
def get_cell_dims(self, active_index=None, global_index=None, ijk=None):
"""Will return a tuple (dx,dy,dz) for cell dimension.
The dx and dy values are best effor estimates of the cell size
along the i and j directions respectively. The three values
are guaranteed to satisfy:
dx * dy * dz = dV
See method get_xyz() for documentation of @active_index,
@global_index and @ijk.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
dx = self._get_cell_dx(gi)
dy = self._get_cell_dy(gi)
dz = self._get_cell_thickness( gi)
return (dx,dy,dz)
def get_num_lgr(self):
"""
How many LGRs are attached to this main grid?
How many LGRs are attached to this main grid; the grid
instance doing the query must itself be a main grid.
"""
return self._num_lgr()
def has_lgr(self, lgr_name):
"""
Query if the grid has an LGR with name @lgr_name.
"""
if self._has_named_lgr(lgr_name):
return True
else:
return False
def get_lgr(self, lgr_key):
"""Get EclGrid instance with LGR content.
Return an EclGrid instance based on the LGR @lgr, the input
argument can either be the name of an LGR or the grid number
of the LGR. The LGR grid instance is mostly like an ordinary
grid instance; the only difference is that it can not be used
for further queries about LGRs.
If the grid does not contain an LGR with this name/nr
exception KeyError will be raised.
"""
lgr = None
if isinstance(lgr_key, int):
if self._has_numbered_lgr(lgr_key):
lgr = self._get_numbered_lgr(lgr_key)
else:
if self._has_named_lgr(lgr_key):
lgr = self._get_named_lgr(lgr_key)
if lgr is None:
raise KeyError("No such LGR: %s" % lgr_key)
lgr.setParent(self)
return lgr
def get_cell_lgr(self, active_index=None, global_index=None, ijk=None):
"""
Get EclGrid instance located in cell.
Will query the current grid instance if the cell given by
@active_index, @global_index or @ijk has been refined with an
LGR. Will return None if the cell in question has not been
refined, the return value can be used for further queries.
See get_xyz() for documentation of the input parameters.
"""
gi = self.__global_index(ijk=ijk, active_index=active_index, global_index=global_index)
lgr = self._get_cell_lgr(gi)
if lgr:
lgr.setParent(self)
return lgr
else:
raise IndexError("No LGR defined for this cell")
def grid_value(self, kw, i, j, k):
"""
Will evalute @kw in location (@i,@j,@k).
The ECLIPSE properties and solution vectors are stored in
restart and init files as 1D vectors of length nx*nx*nz or
nactive. The grid_value() method is a minor convenience
function to convert the (@i,@j,@k) input values to an
appropriate 1D index.
Depending on the length of kw the input arguments are
converted either to an active index or to a global index. If
the length of kw does not fit with either the global size of
the grid or the active size of the grid things will fail hard.
"""
return self._grid_value(kw, i, j, k)
def load_column(self, kw, i, j, column):
"""
Load the values of @kw from the column specified by (@i,@j).
The method will scan through all k values of the input field
@kw for fixed values of i and j. The size of @kw must be
either nactive or nx*ny*nz.
The input argument @column should be a DoubleVector instance,
observe that if size of @kw == nactive k values corresponding
to inactive cells will not be modified in the @column
instance; in that case it is important that @column is
initialized with a suitable default value.
"""
self._load_column( kw, i, j, column)
def create_kw(self, array, kw_name, pack):
"""
Creates an EclKW instance based on existing 3D numpy object.
The method create3D() does the inverse operation; creating a
3D numpy object from an EclKW instance. If the argument @pack
is true the resulting keyword will have length 'nactive',
otherwise the element will have length nx*ny*nz.
"""
if array.ndim == 3:
dims = array.shape
if dims[0] == self.getNX() and dims[1] == self.getNY() and dims[2] == self.getNZ():
dtype = array.dtype
if dtype == numpy.int32:
type = EclDataType.ECL_INT
elif dtype == numpy.float32:
type = EclDataType.ECL_FLOAT
elif dtype == numpy.float64:
type = EclDataType.ECL_DOUBLE
else:
sys.exit("Do not know how to create ecl_kw from type:%s" % dtype)
if pack:
size = self.getNumActive()
else:
size = self.getGlobalSize()
if len(kw_name) > 8:
# Silently truncate to length 8 - ECLIPSE has it's challenges.
kw_name = kw_name[0:8]
kw = EclKW(kw_name, size, type)
active_index = 0
global_index = 0
for k in range(self.getNZ()):
for j in range(self.getNY()):
for i in range(self.getNX()):
if pack:
if self.active(global_index=global_index):
kw[active_index] = array[i,j,k]
active_index += 1
else:
if dtype == numpy.int32:
kw[global_index] = int(array[i,j,k])
else:
kw[global_index] = array[i,j,k]
global_index += 1
return kw
raise ValueError("Wrong size / dimension on array")
def coarse_groups(self):
"""
Will return the number of coarse groups in this grid.
"""
return self._num_coarse_groups()
def in_coarse_group(self, global_index=None, ijk=None, active_index=None):
"""
Will return True or False if the cell is part of coarse group.
"""
global_index = self.__global_index(active_index=active_index, ijk=ijk, global_index=global_index)
return self._in_coarse_group1(global_index)
def create_3d(self, ecl_kw, default = 0):
"""
Creates a 3D numpy array object with the data from @ecl_kw.
Observe that 3D numpy object is a copy of the data in the
EclKW instance, i.e. modification to the numpy object will not
be reflected in the ECLIPSE keyword.
The methods createKW() does the inverse operation; creating an
EclKW instance from a 3D numpy object.
Alternative: Creating the numpy array object is not very
efficient; if you only need a limited number of elements from
the ecl_kw instance it might be wiser to use the grid_value()
method:
value = grid.grid_value(ecl_kw, i, j, k)
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
array = numpy.ones([ self.getGlobalSize() ], dtype=ecl_kw.dtype) * default
kwa = ecl_kw.array
if len(ecl_kw) == self.getGlobalSize():
for i in range(kwa.size):
array[i] = kwa[i]
else:
for global_index in range(self.getGlobalSize()):
active_index = self._get_active_index1(global_index)
array[global_index] = kwa[active_index]
array = array.reshape([self.getNX(), self.getNY(), self.getNZ()], order='F')
return array
else:
err_msg_fmt = 'Keyword "%s" has invalid size %d; must be either nactive=%d or nx*ny*nz=%d'
err_msg = err_msg_fmt % (ecl_kw, len(ecl_kw), self.getNumActive(),
self.getGlobalSize())
raise ValueError(err_msg)
def save_grdecl(self, pyfile, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will write the the grid content as grdecl formatted keywords.
Will only write the main grid.
"""
cfile = CFILE(pyfile)
self._fprintf_grdecl2(cfile, output_unit)
def save_EGRID(self, filename, output_unit=None):
if output_unit is None:
output_unit = self.unit_system
self._fwrite_EGRID2(filename, output_unit)
def save_GRID(self, filename, output_unit=EclUnitTypeEnum.ECL_METRIC_UNITS):
"""
Will save the current grid as a GRID file.
"""
self._fwrite_GRID2( filename, output_unit)
def write_grdecl(self, ecl_kw, pyfile, special_header=None, default_value=0):
"""
Writes an EclKW instance as an ECLIPSE grdecl formatted file.
The input argument @ecl_kw must be an EclKW instance of size
nactive or nx*ny*nz. If the size is nactive the inactive cells
will be filled with @default_value; hence the function will
always write nx*ny*nz elements.
The data in the @ecl_kw argument can be of type integer,
float, double or bool. In the case of bool the default value
must be specified as 1 (True) or 0 (False).
The input argument @pyfile should be a valid python filehandle
opened for writing; i.e.
pyfile = open("PORO.GRDECL", "w")
grid.write_grdecl(poro_kw , pyfile, default_value=0.0)
grid.write_grdecl(permx_kw, pyfile, default_value=0.0)
pyfile.close()
"""
if len(ecl_kw) == self.getNumActive() or len(ecl_kw) == self.getGlobalSize():
cfile = CFILE(pyfile)
self._fwrite_grdecl(ecl_kw, special_header, cfile, default_value)
else:
raise ValueError("Keyword: %s has invalid size(%d), must be either nactive:%d or nx*ny*nz:%d" % (ecl_kw.getName(), len(ecl_kw), self.getNumActive(), self.getGlobalSize()))
def exportACTNUM(self):
actnum = IntVector(initial_size=self.getGlobalSize())
self._init_actnum(actnum.getDataPtr())
return actnum
def compressed_kw_copy(self, kw):
if len(kw) == self.getNumActive():
return kw.copy()
elif len(kw) == self.getGlobalSize():
kw_copy = EclKW(kw.getName(), self.getNumActive(), kw.data_type)
self._compressed_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def global_kw_copy(self, kw, default_value):
if len(kw) == self.getGlobalSize():
return kw.copy()
elif len(kw) == self.getNumActive():
kw_copy = EclKW(kw.getName(), self.getGlobalSize(), kw.data_type)
kw_copy.assign(default_value)
self._global_kw_copy(kw_copy, kw)
return kw_copy
else:
raise ValueError("The input keyword must have nx*n*nz or nactive elements. Size:%d invalid" % len(kw))
def export_ACTNUM_kw(self):
actnum = EclKW("ACTNUM", self.getGlobalSize(), EclDataType.ECL_INT)
self._init_actnum(actnum.getDataPtr())
return actnum
def create_volume_keyword(self, active_size=True):
"""Will create a EclKW initialized with cell volumes.
The purpose of this method is to create a EclKW instance which
is initialized with all the cell volumes, this can then be
used to perform volume summation; i.e. to calculate the total
oil volume:
soil = 1 - sgas - swat
cell_volume = grid.createVolumeKeyword()
tmp = cell_volume * soil
oip = tmp.sum()
The oil in place calculation shown above could easily be
implemented by iterating over the soil kw, however using the
volume keyword has two advantages:
1. The calculation of cell volumes is quite time consuming,
by storing the results in a kw they can be reused.
2. By using the compact form 'oip = cell_volume * soil' the
inner loop iteration will go in C - which is faster.
By default the kw will only have values for the active cells,
but by setting the optional variable @active_size to False you
will get volume values for all cells in the grid.
"""
return self._create_volume_keyword(active_size)
def export_index(self, active_only = False):
"""
Exports a pandas dataframe containing index data of grid cells.
The global_index of the cells is used as index in the pandas frame.
columns 0, 1, 2 are i, j, k, respectively
column 3 contains the active_index
if active_only == True, only active cells are listed,
otherwise all cells are listed.
This index frame should typically be passed to the epxport_data(),
export_volume() and export_corners() functions.
"""
if active_only:
size = self.get_num_active()
else:
size = self.get_global_size()
indx = numpy.zeros(size, dtype=numpy.int32)
data = numpy.zeros([size, 4], dtype=numpy.int32)
self._export_index_frame( indx.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)), active_only )
df = pandas.DataFrame(data=data, index=indx, columns=['i', 'j', 'k', 'active'])
return df
def export_data(self, index_frame, kw, default = 0):
"""
Exports keywoard data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
kw must have size of either global_size or num_active.
The length of the numpy vector is the number of rows in index_frame.
If kw is of length num_active, values in the output vector
corresponding to inactive cells are set to default.
"""
if not isinstance(index_frame, pandas.DataFrame):
raise TypeError("index_frame must be pandas.DataFrame")
if len(kw) == self.get_global_size():
index = numpy.array( index_frame.index, dtype=numpy.int32 )
elif len(kw) == self.get_num_active():
index = numpy.array( index_frame["active"], dtype=numpy.int32 )
else:
raise ValueError("The keyword must have a 3D compatible length")
if kw.type is EclTypeEnum.ECL_INT_TYPE:
data = numpy.full( len(index), default, dtype=numpy.int32 )
self._export_data_as_int( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)) )
return data
elif kw.type is EclTypeEnum.ECL_FLOAT_TYPE or kw.type is EclTypeEnum.ECL_DOUBLE_TYPE:
data = numpy.full( len(index), default, dtype=numpy.float64 )
self._export_data_as_double( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
kw,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
else:
raise TypeError("Keyword must be either int, float or double.")
def export_volume(self, index_frame):
"""
Exports cell volume data to a numpy vector.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( len(index ), dtype=numpy.float64 )
self._export_volume( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_position(self, index_frame):
"""Exports cell position coordinates to a numpy vector (matrix), with columns
0, 1, 2 denoting coordinates x, y, and z, respectively.
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 3], dtype=numpy.float64 )
self._export_position( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_corners(self, index_frame):
"""Exports cell corner position coordinates to a numpy vector (matrix).
Index_fram must be a pandas dataframe with the same structure
as obtained from export_index.
Example of a row of the output matrix:
0 1 2 .... 21 22 23
x1 y1 z1 .... x8 y8 z8
In total there are eight 8 corners. They are described as follows:
The corners in a cell are numbered 0 - 7, where corners 0-3 constitute
one layer and the corners 4-7 consitute the other layer. Observe
that the numbering does not follow a consistent rotation around the face:
j
6---7 /|\
| | |
4---5 |
|
o----------> i
2---3
| |
0---1
Many grids are left-handed, i.e. the direction of increasing z will
point down towards the center of the earth. Hence in the figure above
the layer 4-7 will be deeper down in the reservoir than layer 0-3, and
also have higher z-value.
Warning: The main author of this code suspects that the coordinate
system can be right-handed as well, giving a z axis which will
increase 'towards the sky'; the safest way is probably to check this
explicitly if it matters for the case at hand.
"""
index = numpy.array( index_frame.index, dtype=numpy.int32 )
data = numpy.zeros( [len(index), 24], dtype=numpy.float64 )
self._export_corners( len(index),
index.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) )
return data
def export_coord(self):
return self._export_coord()
def export_zcorn(self):
return self._export_zcorn()
def export_actnum(self):
return self._export_actnum()
def export_mapaxes(self):
if not self._use_mapaxes():
return None
return self._export_mapaxes()
@property
def unit_system(self):
return self._get_unit_system()
monkey_the_camel(EclGrid, 'loadFromGrdecl', EclGrid.load_from_grdecl, classmethod)
monkey_the_camel(EclGrid, 'loadFromFile', EclGrid.load_from_file, classmethod)
monkey_the_camel(EclGrid, 'createRectangular', EclGrid.create_rectangular, classmethod)
monkey_the_camel(EclGrid, 'dualGrid', EclGrid.dual_grid)
monkey_the_camel(EclGrid, 'getDims', EclGrid.get_dims)
monkey_the_camel(EclGrid, 'getNX', EclGrid.get_nx)
monkey_the_camel(EclGrid, 'getNY', EclGrid.get_ny)
monkey_the_camel(EclGrid, 'getNZ', EclGrid.get_nz)
monkey_the_camel(EclGrid, 'getGlobalSize', EclGrid.get_global_size)
monkey_the_camel(EclGrid, 'getNumActive', EclGrid.get_num_active)
monkey_the_camel(EclGrid, 'getNumActiveFracture', EclGrid.get_num_active_fracture)
monkey_the_camel(EclGrid, 'getBoundingBox2D', EclGrid.get_bounding_box_2d)
monkey_the_camel(EclGrid, 'getName', EclGrid.get_name)
monkey_the_camel(EclGrid, 'validCellGeometry', EclGrid.valid_cell_geometry)
monkey_the_camel(EclGrid, 'getNodePos', EclGrid.get_node_pos)
monkey_the_camel(EclGrid, 'getCellCorner', EclGrid.get_cell_corner)
monkey_the_camel(EclGrid, 'getNodeXYZ', EclGrid.get_node_xyz)
monkey_the_camel(EclGrid, 'getLayerXYZ', EclGrid.get_layer_xyz)
monkey_the_camel(EclGrid, 'findCellXY', EclGrid.find_cell_xy)
monkey_the_camel(EclGrid, 'findCellCornerXY', EclGrid.find_cell_corner_xy)
monkey_the_camel(EclGrid, 'getCellDims', EclGrid.get_cell_dims)
monkey_the_camel(EclGrid, 'getNumLGR', EclGrid.get_num_lgr)
monkey_the_camel(EclGrid, 'createKW', EclGrid.create_kw)
monkey_the_camel(EclGrid, 'create3D', EclGrid.create_3d)
monkey_the_camel(EclGrid, 'compressedKWCopy', EclGrid.compressed_kw_copy)
monkey_the_camel(EclGrid, 'globalKWCopy', EclGrid.global_kw_copy)
monkey_the_camel(EclGrid, 'exportACTNUMKw', EclGrid.export_ACTNUM_kw)
monkey_the_camel(EclGrid, 'createVolumeKeyword', EclGrid.create_volume_keyword)
| gpl-3.0 |
gobabiertoAR/data-cleaner | data_cleaner/fingerprint_keyer.py | 1 | 3587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Implementa funciones para clusterizar strings.
Utiliza el algoritmo Key Collision: Fingerprint.
"""
import string
from unidecode import unidecode
from functools import partial
import pandas as pd
def fingerprint_keyer(key_string, sort_tokens=False, remove_duplicates=False):
"""Convierte un string en su fingerprint key representation.
Args:
key_string (str): String para convertir en fingerprint key.
Returns:
str: Fingerprint correspondiente al input.
"""
if pd.isnull(key_string):
return pd.np.nan
# enforece string type
if not isinstance(key_string, str):
key_string = str(key_string)
# remove leading and trailing whitespace, go to lowercase
key_string = key_string.strip().lower()
# remove all punctuation and control characters
for punct in (set(key_string) & set(string.punctuation)):
key_string = key_string.replace(punct, "")
key_string = key_string.replace("\t", " ")
# split the string into whitespace-separated tokens
split_key = key_string.split()
# remove duplicates, if chosen
if remove_duplicates:
dups_removed = set(split_key)
else:
dups_removed = split_key
# sort the tokens, if chosen
if sort_tokens:
# sort the tokens
sorted_split_key = sorted(dups_removed)
else:
sorted_split_key = dups_removed
# join the tokens back together
finger_printed_key = " ".join(sorted_split_key)
# normalize extended western characters to their ASCII
# representation (for example "gödel" → "godel")
return unidecode(finger_printed_key)
def group_fingerprint_strings(raw_strs, sort_tokens=False,
remove_duplicates=False):
"""Clusteriza un conjunto de strings, según sus fingerprints.
Args:
raw_strs (list): Lista de strings sin procesar.
Returns:
(dict, dict): En el primer dict las keys son los fingerprints y los
valores las strings originales. En el segundo las keys son las
strings sin normalizar y los valores el conteo de la cantidad de
veces que aparecen.
"""
res = {}
counts = {}
fingerprint_keyer_with_args = partial(fingerprint_keyer,
sort_tokens=sort_tokens,
remove_duplicates=remove_duplicates)
for (key, raw_str) in zip(map(fingerprint_keyer_with_args, raw_strs),
raw_strs):
res[key] = res.get(key, []) + [raw_str]
counts[raw_str] = counts.get(raw_str, 0) + 1
return res, counts
def get_best_replacements(clusters, counts):
"""Selecciona los strings más utilizados por cluster.
Itera por cada cluster para determinar la mejor string para reemplazar las
strings originales. De momento solo utiliza un conteo simple pero podria
agregarse algun criterio basado en la capitalizacion de las palabras
Args:
clusters (dict): {fingerprint: [raw_string_1, raw_string_2]}
counts (dict): {raw_string: cant_veces_utilizada}
Returns:
dict: {fingerprint: string_mas_usada_para_esa_fingerprint}
"""
res = {}
for (fingerprint, key_strings) in clusters.items():
res[fingerprint] = max(key_strings, key=lambda s: counts[s])
return res
def replace_by_key(replacements, raw_strs):
"""Reemplaza strings por sus mejores equivalentes."""
return [replacements.get(fingerprint_keyer(s), s) for s in raw_strs]
| gpl-3.0 |
pv/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
DGrady/pandas | pandas/tests/test_multilevel.py | 2 | 106590 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101,W0141
from warnings import catch_warnings
import datetime
import itertools
import pytest
import pytz
from numpy.random import randn
import numpy as np
from pandas.core.index import Index, MultiIndex
from pandas import Panel, DataFrame, Series, notna, isna, Timestamp
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.compat import (range, lrange, StringIO, lzip, u, product as
cart_product, zip)
import pandas as pd
import pandas._libs.index as _index
class Base(object):
def setup_method(self, method):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.frame = DataFrame(np.random.randn(10, 3), index=index,
columns=Index(['A', 'B', 'C'], name='exp'))
self.single_level = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
# create test series object
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
s[3] = np.NaN
self.series = s
tm.N = 100
self.tdf = tm.makeTimeDataFrame()
self.ymd = self.tdf.groupby([lambda x: x.year, lambda x: x.month,
lambda x: x.day]).sum()
# use Int64Index, to make sure things work
self.ymd.index.set_levels([lev.astype('i8')
for lev in self.ymd.index.levels],
inplace=True)
self.ymd.index.set_names(['year', 'month', 'day'], inplace=True)
class TestMultiLevel(Base):
def test_append(self):
a, b = self.frame[:5], self.frame[5:]
result = a.append(b)
tm.assert_frame_equal(result, self.frame)
result = a['A'].append(b['A'])
tm.assert_series_equal(result, self.frame['A'])
def test_append_index(self):
idx1 = Index([1.1, 1.2, 1.3])
idx2 = pd.date_range('2011-01-01', freq='D', periods=3,
tz='Asia/Tokyo')
idx3 = Index(['A', 'B', 'C'])
midx_lv2 = MultiIndex.from_arrays([idx1, idx2])
midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3])
result = idx1.append(midx_lv2)
# see gh-7112
tz = pytz.timezone('Asia/Tokyo')
expected_tuples = [(1.1, tz.localize(datetime.datetime(2011, 1, 1))),
(1.2, tz.localize(datetime.datetime(2011, 1, 2))),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)))]
expected = Index([1.1, 1.2, 1.3] + expected_tuples)
tm.assert_index_equal(result, expected)
result = midx_lv2.append(idx1)
expected = Index(expected_tuples + [1.1, 1.2, 1.3])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv2)
expected = MultiIndex.from_arrays([idx1.append(idx1),
idx2.append(idx2)])
tm.assert_index_equal(result, expected)
result = midx_lv2.append(midx_lv3)
tm.assert_index_equal(result, expected)
result = midx_lv3.append(midx_lv2)
expected = Index._simple_new(
np.array([(1.1, tz.localize(datetime.datetime(2011, 1, 1)), 'A'),
(1.2, tz.localize(datetime.datetime(2011, 1, 2)), 'B'),
(1.3, tz.localize(datetime.datetime(2011, 1, 3)), 'C')] +
expected_tuples), None)
tm.assert_index_equal(result, expected)
def test_dataframe_constructor(self):
multi = DataFrame(np.random.randn(4, 4),
index=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
assert not isinstance(multi.columns, MultiIndex)
multi = DataFrame(np.random.randn(4, 4),
columns=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.columns, MultiIndex)
def test_series_constructor(self):
multi = Series(1., index=[np.array(['a', 'a', 'b', 'b']), np.array(
['x', 'y', 'x', 'y'])])
assert isinstance(multi.index, MultiIndex)
multi = Series(1., index=[['a', 'a', 'b', 'b'], ['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
multi = Series(lrange(4), index=[['a', 'a', 'b', 'b'],
['x', 'y', 'x', 'y']])
assert isinstance(multi.index, MultiIndex)
def test_reindex_level(self):
# axis=0
month_sums = self.ymd.sum(level='month')
result = month_sums.reindex(self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum)
tm.assert_frame_equal(result, expected)
# Series
result = month_sums['A'].reindex(self.ymd.index, level=1)
expected = self.ymd['A'].groupby(level='month').transform(np.sum)
tm.assert_series_equal(result, expected, check_names=False)
# axis=1
month_sums = self.ymd.T.sum(axis=1, level='month')
result = month_sums.reindex(columns=self.ymd.index, level=1)
expected = self.ymd.groupby(level='month').transform(np.sum).T
tm.assert_frame_equal(result, expected)
def test_binops_level(self):
def _check_op(opname):
op = getattr(DataFrame, opname)
month_sums = self.ymd.sum(level='month')
result = op(self.ymd, month_sums, level='month')
broadcasted = self.ymd.groupby(level='month').transform(np.sum)
expected = op(self.ymd, broadcasted)
tm.assert_frame_equal(result, expected)
# Series
op = getattr(Series, opname)
result = op(self.ymd['A'], month_sums['A'], level='month')
broadcasted = self.ymd['A'].groupby(level='month').transform(
np.sum)
expected = op(self.ymd['A'], broadcasted)
expected.name = 'A'
tm.assert_series_equal(result, expected)
_check_op('sub')
_check_op('add')
_check_op('mul')
_check_op('div')
def test_pickle(self):
def _test_roundtrip(frame):
unpickled = tm.round_trip_pickle(frame)
tm.assert_frame_equal(frame, unpickled)
_test_roundtrip(self.frame)
_test_roundtrip(self.frame.T)
_test_roundtrip(self.ymd)
_test_roundtrip(self.ymd.T)
def test_reindex(self):
expected = self.frame.iloc[[0, 3]]
reindexed = self.frame.loc[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
with catch_warnings(record=True):
reindexed = self.frame.ix[[('foo', 'one'), ('bar', 'one')]]
tm.assert_frame_equal(reindexed, expected)
def test_reindex_preserve_levels(self):
new_index = self.ymd.index[::10]
chunk = self.ymd.reindex(new_index)
assert chunk.index is new_index
chunk = self.ymd.loc[new_index]
assert chunk.index is new_index
with catch_warnings(record=True):
chunk = self.ymd.ix[new_index]
assert chunk.index is new_index
ymdT = self.ymd.T
chunk = ymdT.reindex(columns=new_index)
assert chunk.columns is new_index
chunk = ymdT.loc[:, new_index]
assert chunk.columns is new_index
def test_repr_to_string(self):
repr(self.frame)
repr(self.ymd)
repr(self.frame.T)
repr(self.ymd.T)
buf = StringIO()
self.frame.to_string(buf=buf)
self.ymd.to_string(buf=buf)
self.frame.T.to_string(buf=buf)
self.ymd.T.to_string(buf=buf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples([('a', 0, 'foo'), ('b', 1, 'bar')],
names=['a', 'b', 'c'])
df = DataFrame({'value': [0, 1]}, index=index)
lines = repr(df).split('\n')
assert lines[2].startswith('a 0 foo')
def test_getitem_simple(self):
df = self.frame.T
col = df['foo', 'one']
tm.assert_almost_equal(col.values, df.values[:, 0])
with pytest.raises(KeyError):
df[('foo', 'four')]
with pytest.raises(KeyError):
df['foobar']
def test_series_getitem(self):
s = self.ymd['A']
result = s[2000, 3]
# TODO(wesm): unused?
# result2 = s.loc[2000, 3]
expected = s.reindex(s.index[42:65])
expected.index = expected.index.droplevel(0).droplevel(0)
tm.assert_series_equal(result, expected)
result = s[2000, 3, 10]
expected = s[49]
assert result == expected
# fancy
expected = s.reindex(s.index[49:51])
result = s.loc[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
with catch_warnings(record=True):
result = s.ix[[(2000, 3, 10), (2000, 3, 13)]]
tm.assert_series_equal(result, expected)
# key error
pytest.raises(KeyError, s.__getitem__, (2000, 3, 4))
def test_series_getitem_corner(self):
s = self.ymd['A']
# don't segfault, GH #495
# out of bounds access
pytest.raises(IndexError, s.__getitem__, len(self.ymd))
# generator
result = s[(x > 0 for x in s)]
expected = s[s > 0]
tm.assert_series_equal(result, expected)
def test_series_setitem(self):
s = self.ymd['A']
s[2000, 3] = np.nan
assert isna(s.values[42:65]).all()
assert notna(s.values[:42]).all()
assert notna(s.values[65:]).all()
s[2000, 3, 10] = np.nan
assert isna(s[49])
def test_series_slice_partial(self):
pass
def test_frame_getitem_setitem_boolean(self):
df = self.frame.T.copy()
values = df.values
result = df[df > 0]
expected = df.where(df > 0)
tm.assert_frame_equal(result, expected)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'boolean values only'):
df[df * 0] = 2
def test_frame_getitem_setitem_slice(self):
# getitem
result = self.frame.iloc[:4]
expected = self.frame[:4]
tm.assert_frame_equal(result, expected)
# setitem
cp = self.frame.copy()
cp.iloc[:4] = 0
assert (cp.values[:4] == 0).all()
assert (cp.values[4:] != 0).all()
def test_frame_getitem_setitem_multislice(self):
levels = [['t1', 't2'], ['a', 'b', 'c']]
labels = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
midx = MultiIndex(labels=labels, levels=levels, names=[None, 'id'])
df = DataFrame({'value': [1, 2, 3, 7, 8]}, index=midx)
result = df.loc[:, 'value']
tm.assert_series_equal(df['value'], result)
with catch_warnings(record=True):
result = df.ix[:, 'value']
tm.assert_series_equal(df['value'], result)
result = df.loc[df.index[1:3], 'value']
tm.assert_series_equal(df['value'][1:3], result)
result = df.loc[:, :]
tm.assert_frame_equal(df, result)
result = df
df.loc[:, 'value'] = 10
result['value'] = 10
tm.assert_frame_equal(df, result)
df.loc[:, :] = 10
tm.assert_frame_equal(df, result)
def test_frame_getitem_multicolumn_empty_level(self):
f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']})
f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'],
['level3 item1', 'level3 item2']]
result = f['level1 item1']
expected = DataFrame([['1'], ['2'], ['3']], index=f.index,
columns=['level3 item1'])
tm.assert_frame_equal(result, expected)
def test_frame_setitem_multi_column(self):
df = DataFrame(randn(10, 4), columns=[['a', 'a', 'b', 'b'],
[0, 1, 0, 1]])
cp = df.copy()
cp['a'] = cp['b']
tm.assert_frame_equal(cp['a'], cp['b'])
# set with ndarray
cp = df.copy()
cp['a'] = cp['b'].values
tm.assert_frame_equal(cp['a'], cp['b'])
# ---------------------------------------
# #1803
columns = MultiIndex.from_tuples([('A', '1'), ('A', '2'), ('B', '1')])
df = DataFrame(index=[1, 3, 5], columns=columns)
# Works, but adds a column instead of updating the two existing ones
df['A'] = 0.0 # Doesn't work
assert (df['A'].values == 0).all()
# it broadcasts
df['B', '1'] = [1, 2, 3]
df['A'] = df['B', '1']
sliced_a1 = df['A', '1']
sliced_a2 = df['A', '2']
sliced_b1 = df['B', '1']
tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False)
tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False)
assert sliced_a1.name == ('A', '1')
assert sliced_a2.name == ('A', '2')
assert sliced_b1.name == ('B', '1')
def test_getitem_tuple_plus_slice(self):
# GH #671
df = DataFrame({'a': lrange(10),
'b': lrange(10),
'c': np.random.randn(10),
'd': np.random.randn(10)})
idf = df.set_index(['a', 'b'])
result = idf.loc[(0, 0), :]
expected = idf.loc[0, 0]
expected2 = idf.xs((0, 0))
with catch_warnings(record=True):
expected3 = idf.ix[0, 0]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected2)
tm.assert_series_equal(result, expected3)
def test_getitem_setitem_tuple_plus_columns(self):
# GH #1013
df = self.ymd[:5]
result = df.loc[(2000, 1, 6), ['A', 'B', 'C']]
expected = df.loc[2000, 1, 6][['A', 'B', 'C']]
tm.assert_series_equal(result, expected)
def test_xs(self):
xs = self.frame.xs(('bar', 'two'))
xs2 = self.frame.loc[('bar', 'two')]
tm.assert_series_equal(xs, xs2)
tm.assert_almost_equal(xs.values, self.frame.values[4])
# GH 6574
# missing values in returned index should be preserrved
acc = [
('a', 'abcde', 1),
('b', 'bbcde', 2),
('y', 'yzcde', 25),
('z', 'xbcde', 24),
('z', None, 26),
('z', 'zbcde', 25),
('z', 'ybcde', 26),
]
df = DataFrame(acc,
columns=['a1', 'a2', 'cnt']).set_index(['a1', 'a2'])
expected = DataFrame({'cnt': [24, 26, 25, 26]}, index=Index(
['xbcde', np.nan, 'zbcde', 'ybcde'], name='a2'))
result = df.xs('z', level='a1')
tm.assert_frame_equal(result, expected)
def test_xs_partial(self):
result = self.frame.xs('foo')
result2 = self.frame.loc['foo']
expected = self.frame.T['foo'].T
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
result = self.ymd.xs((2000, 4))
expected = self.ymd.loc[2000, 4]
tm.assert_frame_equal(result, expected)
# ex from #1796
index = MultiIndex(levels=[['foo', 'bar'], ['one', 'two'], [-1, 1]],
labels=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1,
0, 1]])
df = DataFrame(np.random.randn(8, 4), index=index,
columns=list('abcd'))
result = df.xs(['foo', 'one'])
expected = df.loc['foo', 'one']
tm.assert_frame_equal(result, expected)
def test_xs_level(self):
result = self.frame.xs('two', level='second')
expected = self.frame[self.frame.index.get_level_values(1) == 'two']
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_tuples([('x', 'y', 'z'), ('a', 'b', 'c'), (
'p', 'q', 'r')])
df = DataFrame(np.random.randn(3, 5), index=index)
result = df.xs('c', level=2)
expected = df[1:2]
expected.index = expected.index.droplevel(2)
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
def test_xs_level_multiple(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs(('a', 4), level=['one', 'four'])
expected = df.xs('a').xs(4, level='four')
tm.assert_frame_equal(result, expected)
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
def f(x):
x[:] = 10
pytest.raises(com.SettingWithCopyError, f, result)
# GH2107
dates = lrange(20111201, 20111205)
ids = 'abcde'
idx = MultiIndex.from_tuples([x for x in cart_product(dates, ids)])
idx.names = ['date', 'secid']
df = DataFrame(np.random.randn(len(idx), 3), idx, ['X', 'Y', 'Z'])
rs = df.xs(20111201, level='date')
xp = df.loc[20111201, :]
tm.assert_frame_equal(rs, xp)
def test_xs_level0(self):
from pandas import read_table
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
df = read_table(StringIO(text), sep=r'\s+', engine='python')
result = df.xs('a', level=0)
expected = df.xs('a')
assert len(result) == 2
tm.assert_frame_equal(result, expected)
def test_xs_level_series(self):
s = self.frame['A']
result = s[:, 'two']
expected = self.frame.xs('two', level=1)['A']
tm.assert_series_equal(result, expected)
s = self.ymd['A']
result = s[2000, 5]
expected = self.ymd.loc[2000, 5]['A']
tm.assert_series_equal(result, expected)
# not implementing this for now
pytest.raises(TypeError, s.__getitem__, (2000, slice(3, 4)))
# result = s[2000, 3:4]
# lv =s.index.get_level_values(1)
# expected = s[(lv == 3) | (lv == 4)]
# expected.index = expected.index.droplevel(0)
# tm.assert_series_equal(result, expected)
# can do this though
def test_get_loc_single_level(self):
s = Series(np.random.randn(len(self.single_level)),
index=self.single_level)
for k in self.single_level.values:
s[k]
def test_getitem_toplevel(self):
df = self.frame.T
result = df['foo']
expected = df.reindex(columns=df.columns[:3])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
result = df['bar']
result2 = df.loc[:, 'bar']
expected = df.reindex(columns=df.columns[3:5])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result, result2)
def test_getitem_setitem_slice_integers(self):
index = MultiIndex(levels=[[0, 1, 2], [0, 2]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
frame = DataFrame(np.random.randn(len(index), 4), index=index,
columns=['a', 'b', 'c', 'd'])
res = frame.loc[1:2]
exp = frame.reindex(frame.index[2:])
tm.assert_frame_equal(res, exp)
frame.loc[1:2] = 7
assert (frame.loc[1:2] == 7).values.all()
series = Series(np.random.randn(len(index)), index=index)
res = series.loc[1:2]
exp = series.reindex(series.index[2:])
tm.assert_series_equal(res, exp)
series.loc[1:2] = 7
assert (series.loc[1:2] == 7).values.all()
def test_getitem_int(self):
levels = [[0, 1], [0, 1, 2]]
labels = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
index = MultiIndex(levels=levels, labels=labels)
frame = DataFrame(np.random.randn(6, 2), index=index)
result = frame.loc[1]
expected = frame[-3:]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
# raises exception
pytest.raises(KeyError, frame.loc.__getitem__, 3)
# however this will work
result = self.frame.iloc[2]
expected = self.frame.xs(self.frame.index[2])
tm.assert_series_equal(result, expected)
def test_getitem_partial(self):
ymd = self.ymd.T
result = ymd[2000, 2]
expected = ymd.reindex(columns=ymd.columns[ymd.columns.labels[1] == 1])
expected.columns = expected.columns.droplevel(0).droplevel(0)
tm.assert_frame_equal(result, expected)
def test_setitem_change_dtype(self):
dft = self.frame.T
s = dft['foo', 'two']
dft['foo', 'two'] = s > s.median()
tm.assert_series_equal(dft['foo', 'two'], s > s.median())
# assert isinstance(dft._data.blocks[1].items, MultiIndex)
reindexed = dft.reindex(columns=[('foo', 'two')])
tm.assert_series_equal(reindexed['foo', 'two'], s > s.median())
def test_frame_setitem_ix(self):
self.frame.loc[('bar', 'two'), 'B'] = 5
assert self.frame.loc[('bar', 'two'), 'B'] == 5
# with integer labels
df = self.frame.copy()
df.columns = lrange(3)
df.loc[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
with catch_warnings(record=True):
df = self.frame.copy()
df.columns = lrange(3)
df.ix[('bar', 'two'), 1] = 7
assert df.loc[('bar', 'two'), 1] == 7
def test_fancy_slice_partial(self):
result = self.frame.loc['bar':'baz']
expected = self.frame[3:7]
tm.assert_frame_equal(result, expected)
result = self.ymd.loc[(2000, 2):(2000, 4)]
lev = self.ymd.index.labels[1]
expected = self.ymd[(lev >= 1) & (lev <= 3)]
tm.assert_frame_equal(result, expected)
def test_getitem_partial_column_select(self):
idx = MultiIndex(labels=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
levels=[['a', 'b'], ['x', 'y'], ['p', 'q']])
df = DataFrame(np.random.rand(3, 2), index=idx)
result = df.loc[('a', 'y'), :]
expected = df.loc[('a', 'y')]
tm.assert_frame_equal(result, expected)
result = df.loc[('a', 'y'), [1, 0]]
expected = df.loc[('a', 'y')][[1, 0]]
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[('a', 'y'), [1, 0]]
tm.assert_frame_equal(result, expected)
pytest.raises(KeyError, df.loc.__getitem__,
(('a', 'foo'), slice(None, None)))
def test_delevel_infer_dtype(self):
tuples = [tuple
for tuple in cart_product(
['foo', 'bar'], [10, 20], [1.0, 1.1])]
index = MultiIndex.from_tuples(tuples, names=['prm0', 'prm1', 'prm2'])
df = DataFrame(np.random.randn(8, 3), columns=['A', 'B', 'C'],
index=index)
deleveled = df.reset_index()
assert is_integer_dtype(deleveled['prm1'])
assert is_float_dtype(deleveled['prm2'])
def test_reset_index_with_drop(self):
deleveled = self.ymd.reset_index(drop=True)
assert len(deleveled.columns) == len(self.ymd.columns)
deleveled = self.series.reset_index()
assert isinstance(deleveled, DataFrame)
assert len(deleveled.columns) == len(self.series.index.levels) + 1
deleveled = self.series.reset_index(drop=True)
assert isinstance(deleveled, Series)
def test_count_level(self):
def _check_counts(frame, axis=0):
index = frame._get_axis(axis)
for i in range(index.nlevels):
result = frame.count(axis=axis, level=i)
expected = frame.groupby(axis=axis, level=i).count()
expected = expected.reindex_like(result).astype('i8')
tm.assert_frame_equal(result, expected)
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
self.ymd.iloc[1, [1, 2]] = np.nan
self.ymd.iloc[7, [0, 1]] = np.nan
_check_counts(self.frame)
_check_counts(self.ymd)
_check_counts(self.frame.T, axis=1)
_check_counts(self.ymd.T, axis=1)
# can't call with level on regular DataFrame
df = tm.makeTimeDataFrame()
tm.assert_raises_regex(
TypeError, 'hierarchical', df.count, level=0)
self.frame['D'] = 'foo'
result = self.frame.count(level=0, numeric_only=True)
tm.assert_index_equal(result.columns,
pd.Index(['A', 'B', 'C'], name='exp'))
def test_count_level_series(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz'], ['one', 'two',
'three', 'four']],
labels=[[0, 0, 0, 2, 2], [2, 0, 1, 1, 2]])
s = Series(np.random.randn(len(index)), index=index)
result = s.count(level=0)
expected = s.groupby(level=0).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
result = s.count(level=1)
expected = s.groupby(level=1).count()
tm.assert_series_equal(
result.astype('f8'), expected.reindex(result.index).fillna(0))
def test_count_level_corner(self):
s = self.frame['A'][:0]
result = s.count(level=0)
expected = Series(0, index=s.index.levels[0], name='A')
tm.assert_series_equal(result, expected)
df = self.frame[:0]
result = df.count(level=0)
expected = DataFrame({}, index=s.index.levels[0],
columns=df.columns).fillna(0).astype(np.int64)
tm.assert_frame_equal(result, expected)
def test_get_level_number_out_of_bounds(self):
with tm.assert_raises_regex(IndexError, "Too many levels"):
self.frame.index._get_level_number(2)
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
self.frame.index._get_level_number(-3)
def test_unstack(self):
# just check that it works for now
unstacked = self.ymd.unstack()
unstacked.unstack()
# test that ints work
self.ymd.astype(int).unstack()
# test that int32 work
self.ymd.astype(np.int32).unstack()
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples([(0, 'foo', 0), (0, 'bar', 0), (
1, 'baz', 1), (1, 'qux', 1)])
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
def test_stack(self):
# regular roundtrip
unstacked = self.ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
unlexsorted = self.ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), self.ymd)
# columns unsorted
unstacked = self.ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, self.ymd)
# more than 2 levels in the columns
unstacked = self.ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = self.ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = self.ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = self.ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = self.ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = self.ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = self.ymd.unstack(0).stack(-2)
expected = self.ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert not left.index.is_unique
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(np.arange(12).reshape(4, 3),
index=list('abab'),
columns=['1st', '2nd', '3rd'])
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd', '3rd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
np.arange(3), 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ['1st', '2nd', '1st']
mi = MultiIndex(levels=[['a', 'b'], ['1st', '2nd']], labels=[np.tile(
np.arange(2).repeat(3), 2), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ('a', 2), ('b', 1), ('a', 1), ('b', 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(levels=[['a', 'b'], [1, 2], ['1st', '2nd']],
labels=[np.tile(
np.arange(2).repeat(3), 2), np.repeat(
[1, 0, 1], [3, 6, 3]), np.tile(
[0, 1, 0], 4)])
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thur,Dinner,No,3.0,1
Thur,Lunch,No,117.32,44
Thur,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(['day', 'time', 'smoker'])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df['foo'].stack().sort_index()
tm.assert_series_equal(stacked['foo'], result, check_names=False)
assert result.name is None
assert stacked['bar'].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame({'state': ['naive', 'naive', 'naive', 'activ', 'activ',
'activ'],
'exp': ['a', 'b', 'b', 'b', 'a', 'a'],
'barcode': [1, 2, 3, 4, 1, 3],
'v': ['hi', 'hi', 'bye', 'bye', 'bye', 'peace'],
'extra': np.arange(6.)})
result = df.groupby(['state', 'exp', 'barcode', 'v']).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(
restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self):
unstacked = self.frame.unstack()
assert unstacked.index.name == 'first'
assert unstacked.columns.names == ['exp', 'second']
restacked = unstacked.stack()
assert restacked.index.names == self.frame.index.names
def test_unstack_level_name(self):
result = self.frame.unstack('second')
expected = self.frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self):
unstacked = self.frame.unstack('second')
result = unstacked.stack('exp')
expected = self.frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = self.frame.stack('exp')
expected = self.frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(self):
unstacked = self.ymd.unstack(['year', 'month'])
expected = self.ymd.unstack('year').unstack('month')
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = self.ymd['A']
s_unstacked = s.unstack(['year', 'month'])
tm.assert_frame_equal(s_unstacked, expected['A'])
restacked = unstacked.stack(['year', 'month'])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, self.ymd)
assert restacked.index.names == self.ymd.index.names
# GH #451
unstacked = self.ymd.unstack([1, 2])
expected = self.ymd.unstack(1).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected)
unstacked = self.ymd.unstack([2, 1])
expected = self.ymd.unstack(2).unstack(1).dropna(axis=1, how='all')
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(self):
unstacked = self.ymd.unstack(['year', 'month'])
# Can't use mixture of names and numbers to stack
with tm.assert_raises_regex(ValueError, "level should contain"):
unstacked.stack([0, 'month'])
def test_stack_multiple_out_of_bounds(self):
# nlevels == 3
unstacked = self.ymd.unstack(['year', 'month'])
with tm.assert_raises_regex(IndexError, "Too many levels"):
unstacked.stack([2, 3])
with tm.assert_raises_regex(IndexError,
"not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH 4342
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period')
idx2 = Index(['A', 'B'] * 3, name='str')
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period')
expected = DataFrame({'A': [1, 3, 5], 'B': [2, 4, 6]}, index=e_idx,
columns=['A', 'B'])
expected.columns.name = 'str'
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(['2013-01', '2013-01', '2013-02', '2013-02',
'2013-03', '2013-03'], freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07'], freq='M', name='period2')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
['2013-01', '2013-02', '2013-03'], freq='M', name='period1')
e_cols = pd.PeriodIndex(['2013-07', '2013-08', '2013-09', '2013-10',
'2013-11', '2013-12'],
freq='M', name='period2')
expected = DataFrame([[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan]],
index=e_idx, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH 4342
idx1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-02', '2014-02',
'2014-01', '2014-01'],
freq='M', name='period1')
idx2 = pd.PeriodIndex(['2013-12', '2013-12', '2014-02', '2013-10',
'2013-10', '2014-02'],
freq='M', name='period2')
value = {'A': [1, 2, 3, 4, 5, 6], 'B': [6, 5, 4, 3, 2, 1]}
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = pd.PeriodIndex(['2014-01', '2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(['2013-10', '2013-12', '2014-02', '2013-10',
'2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A A B B B'.split(), e_2])
expected = DataFrame([[5, 1, 6, 2, 6, 1], [4, 2, 3, 3, 5, 4]],
index=e_1, columns=e_cols)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
e_1 = pd.PeriodIndex(['2014-01', '2014-02', '2014-01',
'2014-02'], freq='M', name='period1')
e_2 = pd.PeriodIndex(
['2013-10', '2013-12', '2014-02'], freq='M', name='period2')
e_cols = pd.MultiIndex.from_arrays(['A A B B'.split(), e_1])
expected = DataFrame([[5, 4, 2, 3], [1, 2, 6, 5], [6, 3, 1, 4]],
index=e_2, columns=e_cols)
tm.assert_frame_equal(result3, expected)
def test_stack_multiple_bug(self):
""" bug when some uniques are not present in the data #3170"""
id_col = ([1] * 3) + ([2] * 3)
name = (['a'] * 3) + (['b'] * 3)
date = pd.to_datetime(['2013-01-03', '2013-01-04', '2013-01-05'] * 2)
var1 = np.random.randint(0, 100, 6)
df = DataFrame(dict(ID=id_col, NAME=name, DATE=date, VAR1=var1))
multi = df.set_index(['DATE', 'ID'])
multi.columns.name = 'Params'
unst = multi.unstack('ID')
down = unst.resample('W-THU').mean()
rs = down.stack('ID')
xp = unst.loc[:, ['VAR1']].resample('W-THU').mean().stack('ID')
xp.columns.name = 'Params'
tm.assert_frame_equal(rs, xp)
def test_stack_dropna(self):
# GH #3997
df = pd.DataFrame({'A': ['a1', 'a2'], 'B': ['b1', 'b2'], 'C': [1, 1]})
df = df.set_index(['A', 'B'])
stacked = df.unstack().stack(dropna=False)
assert len(stacked) > len(stacked.dropna())
stacked = df.unstack().stack(dropna=True)
tm.assert_frame_equal(stacked, stacked.dropna())
def test_unstack_multiple_hierarchical(self):
df = DataFrame(index=[[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 1, 0, 1, 0, 1, 0, 1
]],
columns=[[0, 0, 1, 1], [0, 1, 0, 1]])
df.index.names = ['a', 'b', 'c']
df.columns.names = ['d', 'e']
# it works!
df.unstack(['b', 'c'])
def test_groupby_transform(self):
s = self.frame['A']
grouper = s.index.get_level_values(0)
grouped = s.groupby(grouper)
applied = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
result = applied.reindex(expected.index)
tm.assert_series_equal(result, expected, check_names=False)
def test_unstack_sparse_keyspace(self):
# memory problems with naive impl #2278
# Generate Long File & Test Pivot
NUM_ROWS = 1000
df = DataFrame({'A': np.random.randint(100, size=NUM_ROWS),
'B': np.random.randint(300, size=NUM_ROWS),
'C': np.random.randint(-7, 7, size=NUM_ROWS),
'D': np.random.randint(-19, 19, size=NUM_ROWS),
'E': np.random.randint(3000, size=NUM_ROWS),
'F': np.random.randn(NUM_ROWS)})
idf = df.set_index(['A', 'B', 'C', 'D', 'E'])
# it works! is sufficient
idf.unstack('E')
def test_unstack_unobserved_keys(self):
# related to #2278 refactoring
levels = [[0, 1], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
index = MultiIndex(levels, labels)
df = DataFrame(np.random.randn(4, 2), index=index)
result = df.unstack()
assert len(result.columns) == 4
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_order_with_unsorted_levels(self):
# GH 16323
def manual_compare_stacked(df, df_stacked, lev0, lev1):
assert all(df.loc[row, col] ==
df_stacked.loc[(row, col[lev0]), col[lev1]]
for row in df.index for col in df.columns)
# deep check for 1-row case
for width in [2, 3]:
levels_poss = itertools.product(
itertools.permutations([0, 1, 2], width),
repeat=2)
for levels in levels_poss:
columns = MultiIndex(levels=levels,
labels=[[0, 0, 1, 1],
[0, 1, 0, 1]])
df = DataFrame(columns=columns, data=[range(4)])
for stack_lev in range(2):
df_stacked = df.stack(stack_lev)
manual_compare_stacked(df, df_stacked,
stack_lev, 1 - stack_lev)
# check multi-row case
mi = MultiIndex(levels=[["A", "C", "B"], ["B", "A", "C"]],
labels=[np.repeat(range(3), 3), np.tile(range(3), 3)])
df = DataFrame(columns=mi, index=range(5),
data=np.arange(5 * len(mi)).reshape(5, -1))
manual_compare_stacked(df, df.stack(0), 0, 1)
def test_groupby_corner(self):
midx = MultiIndex(levels=[['foo'], ['bar'], ['baz']],
labels=[[0], [0], [0]],
names=['one', 'two', 'three'])
df = DataFrame([np.random.rand(4)], columns=['a', 'b', 'c', 'd'],
index=midx)
# should work
df.groupby(level='three')
def test_groupby_level_no_obs(self):
# #1697
midx = MultiIndex.from_tuples([('f1', 's1'), ('f1', 's2'), (
'f2', 's1'), ('f2', 's2'), ('f3', 's1'), ('f3', 's2')])
df = DataFrame(
[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]], columns=midx)
df1 = df.select(lambda u: u[0] in ['f2', 'f3'], axis=1)
grouped = df1.groupby(axis=1, level=0)
result = grouped.sum()
assert (result.columns == ['f2', 'f3']).all()
def test_join(self):
a = self.frame.loc[self.frame.index[:5], ['A']]
b = self.frame.loc[self.frame.index[2:], ['B', 'C']]
joined = a.join(b, how='outer').reindex(self.frame.index)
expected = self.frame.copy()
expected.values[np.isnan(joined.values)] = np.nan
assert not np.isnan(joined.values).all()
# TODO what should join do with names ?
tm.assert_frame_equal(joined, expected, check_names=False)
def test_swaplevel(self):
swapped = self.frame['A'].swaplevel()
swapped2 = self.frame['A'].swaplevel(0)
swapped3 = self.frame['A'].swaplevel(0, 1)
swapped4 = self.frame['A'].swaplevel('first', 'second')
assert not swapped.index.equals(self.frame.index)
tm.assert_series_equal(swapped, swapped2)
tm.assert_series_equal(swapped, swapped3)
tm.assert_series_equal(swapped, swapped4)
back = swapped.swaplevel()
back2 = swapped.swaplevel(0)
back3 = swapped.swaplevel(0, 1)
back4 = swapped.swaplevel('second', 'first')
assert back.index.equals(self.frame.index)
tm.assert_series_equal(back, back2)
tm.assert_series_equal(back, back3)
tm.assert_series_equal(back, back4)
ft = self.frame.T
swapped = ft.swaplevel('first', 'second', axis=1)
exp = self.frame.swaplevel('first', 'second').T
tm.assert_frame_equal(swapped, exp)
def test_swaplevel_panel(self):
with catch_warnings(record=True):
panel = Panel({'ItemA': self.frame, 'ItemB': self.frame * 2})
expected = panel.copy()
expected.major_axis = expected.major_axis.swaplevel(0, 1)
for result in (panel.swaplevel(axis='major'),
panel.swaplevel(0, axis='major'),
panel.swaplevel(0, 1, axis='major')):
tm.assert_panel_equal(result, expected)
def test_reorder_levels(self):
result = self.ymd.reorder_levels(['month', 'day', 'year'])
expected = self.ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].reorder_levels(['month', 'day', 'year'])
expected = self.ymd['A'].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = self.ymd.T.reorder_levels(['month', 'day', 'year'], axis=1)
expected = self.ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with tm.assert_raises_regex(TypeError, 'hierarchical axis'):
self.ymd.reorder_levels([1, 2], axis=1)
with tm.assert_raises_regex(IndexError, 'Too many levels'):
self.ymd.index.reorder_levels([1, 2, 3])
def test_insert_index(self):
df = self.ymd[:5].T
df[2000, 1, 10] = df[2000, 1, 7]
assert isinstance(df.columns, MultiIndex)
assert (df[2000, 1, 10] == df[2000, 1, 7]).all()
def test_alignment(self):
x = Series(data=[1, 2, 3], index=MultiIndex.from_tuples([("A", 1), (
"A", 2), ("B", 3)]))
y = Series(data=[4, 5, 6], index=MultiIndex.from_tuples([("Z", 1), (
"Z", 2), ("B", 3)]))
res = x - y
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
# hit non-monotonic code path
res = x[::-1] - y[::-1]
exp_index = x.index.union(y.index)
exp = x.reindex(exp_index) - y.reindex(exp_index)
tm.assert_series_equal(res, exp)
def test_frame_getitem_view(self):
df = self.frame.T.copy()
# this works because we are modifying the underlying array
# really a no-no
df['foo'].values[:] = 0
assert (df['foo'].values == 0).all()
# but not if it's mixed-type
df['foo', 'four'] = 'foo'
df = df.sort_index(level=0, axis=1)
# this will work, but will raise/warn as its chained assignment
def f():
df['foo']['one'] = 2
return df
pytest.raises(com.SettingWithCopyError, f)
try:
df = f()
except:
pass
assert (df['foo', 'one'] == 0).all()
def test_count(self):
frame = self.frame.copy()
frame.index.names = ['a', 'b']
result = frame.count(level='b')
expect = self.frame.count(level=1)
tm.assert_frame_equal(result, expect, check_names=False)
result = frame.count(level='a')
expect = self.frame.count(level=0)
tm.assert_frame_equal(result, expect, check_names=False)
series = self.series.copy()
series.index.names = ['a', 'b']
result = series.count(level='b')
expect = self.series.count(level=1)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'b'
result = series.count(level='a')
expect = self.series.count(level=0)
tm.assert_series_equal(result, expect, check_names=False)
assert result.index.name == 'a'
pytest.raises(KeyError, series.count, 'x')
pytest.raises(KeyError, frame.count, level='x')
AGG_FUNCTIONS = ['sum', 'prod', 'min', 'max', 'median', 'mean', 'skew',
'mad', 'std', 'var', 'sem']
def test_series_group_min_max(self):
for op, level, skipna in cart_product(self.AGG_FUNCTIONS, lrange(2),
[False, True]):
grouped = self.series.groupby(level=level)
aggf = lambda x: getattr(x, op)(skipna=skipna)
# skipna=True
leftside = grouped.agg(aggf)
rightside = getattr(self.series, op)(level=level, skipna=skipna)
tm.assert_series_equal(leftside, rightside)
def test_frame_group_ops(self):
self.frame.iloc[1, [1, 2]] = np.nan
self.frame.iloc[7, [0, 1]] = np.nan
for op, level, axis, skipna in cart_product(self.AGG_FUNCTIONS,
lrange(2), lrange(2),
[False, True]):
if axis == 0:
frame = self.frame
else:
frame = self.frame.T
grouped = frame.groupby(level=level, axis=axis)
pieces = []
def aggf(x):
pieces.append(x)
return getattr(x, op)(skipna=skipna, axis=axis)
leftside = grouped.agg(aggf)
rightside = getattr(frame, op)(level=level, axis=axis,
skipna=skipna)
# for good measure, groupby detail
level_index = frame._get_axis(axis).levels[level]
tm.assert_index_equal(leftside._get_axis(axis), level_index)
tm.assert_index_equal(rightside._get_axis(axis), level_index)
tm.assert_frame_equal(leftside, rightside)
def test_stat_op_corner(self):
obj = Series([10.0], index=MultiIndex.from_tuples([(2, 3)]))
result = obj.sum(level=0)
expected = Series([10.0], index=[2])
tm.assert_series_equal(result, expected)
def test_frame_any_all_group(self):
df = DataFrame(
{'data': [False, False, True, False, True, False, True]},
index=[
['one', 'one', 'two', 'one', 'two', 'two', 'two'],
[0, 1, 0, 2, 1, 2, 3]])
result = df.any(level=0)
ex = DataFrame({'data': [False, True]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({'data': [False, False]}, index=['one', 'two'])
tm.assert_frame_equal(result, ex)
def test_std_var_pass_ddof(self):
index = MultiIndex.from_arrays([np.arange(5).repeat(10), np.tile(
np.arange(10), 5)])
df = DataFrame(np.random.randn(len(index), 5), index=index)
for meth in ['var', 'std']:
ddof = 4
alt = lambda x: getattr(x, meth)(ddof=ddof)
result = getattr(df[0], meth)(level=0, ddof=ddof)
expected = df[0].groupby(level=0).agg(alt)
tm.assert_series_equal(result, expected)
result = getattr(df, meth)(level=0, ddof=ddof)
expected = df.groupby(level=0).agg(alt)
tm.assert_frame_equal(result, expected)
def test_frame_series_agg_multiple_levels(self):
result = self.ymd.sum(level=['year', 'month'])
expected = self.ymd.groupby(level=['year', 'month']).sum()
tm.assert_frame_equal(result, expected)
result = self.ymd['A'].sum(level=['year', 'month'])
expected = self.ymd['A'].groupby(level=['year', 'month']).sum()
tm.assert_series_equal(result, expected)
def test_groupby_multilevel(self):
result = self.ymd.groupby(level=[0, 1]).mean()
k1 = self.ymd.index.get_level_values(0)
k2 = self.ymd.index.get_level_values(1)
expected = self.ymd.groupby([k1, k2]).mean()
# TODO groupby with level_values drops names
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.names == self.ymd.index.names[:2]
result2 = self.ymd.groupby(level=self.ymd.index.names[:2]).mean()
tm.assert_frame_equal(result, result2)
def test_groupby_multilevel_with_transform(self):
pass
def test_multilevel_consolidate(self):
index = MultiIndex.from_tuples([('foo', 'one'), ('foo', 'two'), (
'bar', 'one'), ('bar', 'two')])
df = DataFrame(np.random.randn(4, 4), index=index, columns=index)
df['Totals', ''] = df.sum(1)
df = df._consolidate()
def test_ix_preserve_names(self):
result = self.ymd.loc[2000]
result2 = self.ymd['A'].loc[2000]
assert result.index.names == self.ymd.index.names[1:]
assert result2.index.names == self.ymd.index.names[1:]
result = self.ymd.loc[2000, 2]
result2 = self.ymd['A'].loc[2000, 2]
assert result.index.name == self.ymd.index.names[2]
assert result2.index.name == self.ymd.index.names[2]
def test_partial_set(self):
# GH #397
df = self.ymd.copy()
exp = self.ymd.copy()
df.loc[2000, 4] = 0
exp.loc[2000, 4].values[:] = 0
tm.assert_frame_equal(df, exp)
df['A'].loc[2000, 4] = 1
exp['A'].loc[2000, 4].values[:] = 1
tm.assert_frame_equal(df, exp)
df.loc[2000] = 5
exp.loc[2000].values[:] = 5
tm.assert_frame_equal(df, exp)
# this works...for now
df['A'].iloc[14] = 5
assert df['A'][14] == 5
def test_unstack_preserve_types(self):
# GH #403
self.ymd['E'] = 'foo'
self.ymd['F'] = 2
unstacked = self.ymd.unstack('month')
assert unstacked['A', 1].dtype == np.float64
assert unstacked['E', 1].dtype == np.object_
assert unstacked['F', 1].dtype == np.float64
def test_unstack_group_index_overflow(self):
labels = np.tile(np.arange(500), 2)
level = np.arange(500)
index = MultiIndex(levels=[level] * 8 + [[0, 1]],
labels=[labels] * 8 + [np.arange(2).repeat(500)])
s = Series(np.arange(1000), index=index)
result = s.unstack()
assert result.shape == (500, 2)
# test roundtrip
stacked = result.stack()
tm.assert_series_equal(s, stacked.reindex(s.index))
# put it at beginning
index = MultiIndex(levels=[[0, 1]] + [level] * 8,
labels=[np.arange(2).repeat(500)] + [labels] * 8)
s = Series(np.arange(1000), index=index)
result = s.unstack(0)
assert result.shape == (500, 2)
# put it in middle
index = MultiIndex(levels=[level] * 4 + [[0, 1]] + [level] * 4,
labels=([labels] * 4 + [np.arange(2).repeat(500)] +
[labels] * 4))
s = Series(np.arange(1000), index=index)
result = s.unstack(4)
assert result.shape == (500, 2)
def test_getitem_lowerdim_corner(self):
pytest.raises(KeyError, self.frame.loc.__getitem__,
(('bar', 'three'), 'B'))
# in theory should be inserting in a sorted space????
self.frame.loc[('bar', 'three'), 'B'] = 0
assert self.frame.sort_index().loc[('bar', 'three'), 'B'] == 0
# ---------------------------------------------------------------------
# AMBIGUOUS CASES!
def test_partial_ix_missing(self):
pytest.skip("skipping for now")
result = self.ymd.loc[2000, 0]
expected = self.ymd.loc[2000]['A']
tm.assert_series_equal(result, expected)
# need to put in some work here
# self.ymd.loc[2000, 0] = 0
# assert (self.ymd.loc[2000]['A'] == 0).all()
# Pretty sure the second (and maybe even the first) is already wrong.
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6))
pytest.raises(Exception, self.ymd.loc.__getitem__, (2000, 6), 0)
# ---------------------------------------------------------------------
def test_to_html(self):
self.ymd.columns.name = 'foo'
self.ymd.to_html()
self.ymd.T.to_html()
def test_level_with_tuples(self):
index = MultiIndex(levels=[[('foo', 'bar', 0), ('foo', 'baz', 0), (
'foo', 'qux', 0)], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar', 0)]
result2 = series.loc[('foo', 'bar', 0)]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
pytest.raises(KeyError, series.__getitem__, (('foo', 'bar', 0), 2))
result = frame.loc[('foo', 'bar', 0)]
result2 = frame.xs(('foo', 'bar', 0))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
index = MultiIndex(levels=[[('foo', 'bar'), ('foo', 'baz'), (
'foo', 'qux')], [0, 1]],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]])
series = Series(np.random.randn(6), index=index)
frame = DataFrame(np.random.randn(6, 4), index=index)
result = series[('foo', 'bar')]
result2 = series.loc[('foo', 'bar')]
expected = series[:2]
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
result = frame.loc[('foo', 'bar')]
result2 = frame.xs(('foo', 'bar'))
expected = frame[:2]
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_int_series_slicing(self):
s = self.ymd['A']
result = s[5:]
expected = s.reindex(s.index[5:])
tm.assert_series_equal(result, expected)
exp = self.ymd['A'].copy()
s[5:] = 0
exp.values[5:] = 0
tm.assert_numpy_array_equal(s.values, exp.values)
result = self.ymd[5:]
expected = self.ymd.reindex(s.index[5:])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('unicode_strings', [True, False])
def test_mixed_depth_get(self, unicode_strings):
# If unicode_strings is True, the column labels in dataframe
# construction will use unicode strings in Python 2 (pull request
# #17099).
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
if unicode_strings:
arrays = [[u(s) for s in arr] for arr in arrays]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(4, 6), columns=index)
result = df['a']
expected = df['a', '', ''].rename('a')
tm.assert_series_equal(result, expected)
result = df['routine1', 'result1']
expected = df['routine1', 'result1', '']
expected = expected.rename(('routine1', 'result1'))
tm.assert_series_equal(result, expected)
def test_mixed_depth_insert(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.copy()
expected = df.copy()
result['b'] = [1, 2, 3, 4]
expected['b', '', ''] = [1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_mixed_depth_drop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
result = df.drop('a', axis=1)
expected = df.drop([('a', '', '')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(['top'], axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
expected = expected.drop([('top', 'OD', 'wy')], axis=1)
tm.assert_frame_equal(expected, result)
result = df.drop(('top', 'OD', 'wx'), axis=1)
expected = df.drop([('top', 'OD', 'wx')], axis=1)
tm.assert_frame_equal(expected, result)
expected = df.drop([('top', 'OD', 'wy')], axis=1)
expected = df.drop('top', axis=1)
result = df.drop('result1', level=1, axis=1)
expected = df.drop([('routine1', 'result1', ''),
('routine2', 'result1', '')], axis=1)
tm.assert_frame_equal(expected, result)
def test_drop_nonunique(self):
df = DataFrame([["x-a", "x", "a", 1.5], ["x-a", "x", "a", 1.2],
["z-c", "z", "c", 3.1], ["x-a", "x", "a", 4.1],
["x-b", "x", "b", 5.1], ["x-b", "x", "b", 4.1],
["x-b", "x", "b", 2.2],
["y-a", "y", "a", 1.2], ["z-b", "z", "b", 2.1]],
columns=["var1", "var2", "var3", "var4"])
grp_size = df.groupby("var1").size()
drop_idx = grp_size.loc[grp_size == 1]
idf = df.set_index(["var1", "var2", "var3"])
# it works! #2101
result = idf.drop(drop_idx.index, level=0).reset_index()
expected = df[-df.var1.isin(drop_idx.index)]
result.index = expected.index
tm.assert_frame_equal(result, expected)
def test_mixed_depth_pop(self):
arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'],
['', 'OD', 'OD', 'result1', 'result2', 'result1'],
['', 'wx', 'wy', '', '', '']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(randn(4, 6), columns=index)
df1 = df.copy()
df2 = df.copy()
result = df1.pop('a')
expected = df2.pop(('a', '', ''))
tm.assert_series_equal(expected, result, check_names=False)
tm.assert_frame_equal(df1, df2)
assert result.name == 'a'
expected = df1['top']
df1 = df1.drop(['top'], axis=1)
result = df2.pop('top')
tm.assert_frame_equal(expected, result)
tm.assert_frame_equal(df1, df2)
def test_reindex_level_partial_selection(self):
result = self.frame.reindex(['foo', 'qux'], level=0)
expected = self.frame.iloc[[0, 1, 2, 7, 8, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.reindex_axis(['foo', 'qux'], axis=1, level=0)
tm.assert_frame_equal(result, expected.T)
result = self.frame.loc[['foo', 'qux']]
tm.assert_frame_equal(result, expected)
result = self.frame['A'].loc[['foo', 'qux']]
tm.assert_series_equal(result, expected['A'])
result = self.frame.T.loc[:, ['foo', 'qux']]
tm.assert_frame_equal(result, expected.T)
def test_setitem_multiple_partial(self):
expected = self.frame.copy()
result = self.frame.copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame.copy()
result = self.frame.copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_frame_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc[['foo', 'bar']] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
expected = self.frame['A'].copy()
result = self.frame['A'].copy()
result.loc['foo':'bar'] = 0
expected.loc['foo'] = 0
expected.loc['bar'] = 0
tm.assert_series_equal(result, expected)
def test_drop_level(self):
result = self.frame.drop(['bar', 'qux'], level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]]
tm.assert_frame_equal(result, expected)
result = self.frame.drop(['two'], level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]]
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['bar', 'qux'], axis=1, level='first')
expected = self.frame.iloc[[0, 1, 2, 5, 6]].T
tm.assert_frame_equal(result, expected)
result = self.frame.T.drop(['two'], axis=1, level='second')
expected = self.frame.iloc[[0, 2, 3, 6, 7, 9]].T
tm.assert_frame_equal(result, expected)
def test_drop_level_nonunique_datetime(self):
# GH 12701
idx = pd.Index([2, 3, 4, 4, 5], name='id')
idxdt = pd.to_datetime(['201603231400',
'201603231500',
'201603231600',
'201603231600',
'201603231700'])
df = DataFrame(np.arange(10).reshape(5, 2),
columns=list('ab'), index=idx)
df['tstamp'] = idxdt
df = df.set_index('tstamp', append=True)
ts = pd.Timestamp('201603231600')
assert not df.index.is_unique
result = df.drop(ts, level='tstamp')
expected = df.loc[idx != 4]
tm.assert_frame_equal(result, expected)
def test_drop_preserve_names(self):
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1],
[1, 2, 3, 1, 2, 3]],
names=['one', 'two'])
df = DataFrame(np.random.randn(6, 3), index=index)
result = df.drop([(0, 2)])
assert result.index.names == ('one', 'two')
def test_unicode_repr_issues(self):
levels = [Index([u('a/\u03c3'), u('b/\u03c3'), u('c/\u03c3')]),
Index([0, 1])]
labels = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, labels=labels)
repr(index.levels)
# NumPy bug
# repr(index.get_level_values(1))
def test_unicode_repr_level_names(self):
index = MultiIndex.from_tuples([(0, 0), (1, 1)],
names=[u('\u0394'), 'i1'])
s = Series(lrange(2), index=index)
df = DataFrame(np.random.randn(2, 4), index=index)
repr(s)
repr(df)
def test_dataframe_insert_column_all_na(self):
# GH #1534
mix = MultiIndex.from_tuples([('1a', '2a'), ('1a', '2b'), ('1a', '2c')
])
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
s = Series({(1, 1): 1, (1, 2): 2})
df['new'] = s
assert df['new'].isna().all()
def test_join_segfault(self):
# 1532
df1 = DataFrame({'a': [1, 1], 'b': [1, 2], 'x': [1, 2]})
df2 = DataFrame({'a': [2, 2], 'b': [1, 2], 'y': [1, 2]})
df1 = df1.set_index(['a', 'b'])
df2 = df2.set_index(['a', 'b'])
# it works!
for how in ['left', 'right', 'outer']:
df1.join(df2, how=how)
def test_set_column_scalar_with_ix(self):
subset = self.frame.index[[1, 4, 5]]
self.frame.loc[subset] = 99
assert (self.frame.loc[subset].values == 99).all()
col = self.frame['B']
col[subset] = 97
assert (self.frame.loc[subset, 'B'] == 97).all()
def test_frame_dict_constructor_empty_series(self):
s1 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)]))
s2 = Series([
1, 2, 3, 4
], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)]))
s3 = Series()
# it works!
DataFrame({'foo': s1, 'bar': s2, 'baz': s3})
DataFrame.from_dict({'foo': s1, 'baz': s3, 'bar': s2})
def test_indexing_ambiguity_bug_1678(self):
columns = MultiIndex.from_tuples([('Ohio', 'Green'), ('Ohio', 'Red'), (
'Colorado', 'Green')])
index = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b', 2)
])
frame = DataFrame(np.arange(12).reshape((4, 3)), index=index,
columns=columns)
result = frame.iloc[:, 1]
exp = frame.loc[:, ('Ohio', 'Red')]
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_nonunique_assignment_1750(self):
df = DataFrame([[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]],
columns=list("ABCD"))
df = df.set_index(['A', 'B'])
ix = MultiIndex.from_tuples([(1, 1)])
df.loc[ix, "C"] = '_'
assert (df.xs((1, 1))['C'] == '_').all()
def test_indexing_over_hashtable_size_cutoff(self):
n = 10000
old_cutoff = _index._SIZE_CUTOFF
_index._SIZE_CUTOFF = 20000
s = Series(np.arange(n),
MultiIndex.from_arrays((["a"] * n, np.arange(n))))
# hai it works!
assert s[("a", 5)] == 5
assert s[("a", 6)] == 6
assert s[("a", 7)] == 7
_index._SIZE_CUTOFF = old_cutoff
def test_multiindex_na_repr(self):
# only an issue with long columns
from numpy import nan
df3 = DataFrame({
'A' * 30: {('A', 'A0006000', 'nuit'): 'A0006000'},
'B' * 30: {('A', 'A0006000', 'nuit'): nan},
'C' * 30: {('A', 'A0006000', 'nuit'): nan},
'D' * 30: {('A', 'A0006000', 'nuit'): nan},
'E' * 30: {('A', 'A0006000', 'nuit'): 'A'},
'F' * 30: {('A', 'A0006000', 'nuit'): nan},
})
idf = df3.set_index(['A' * 30, 'C' * 30])
repr(idf)
def test_assign_index_sequences(self):
# #2200
df = DataFrame({"a": [1, 2, 3],
"b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ("faz", "boo")
df.index = l
repr(df)
# this travels an improper code path
l[0] = ["faz", "boo"]
df.index = l
repr(df)
def test_tuples_have_na(self):
index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
labels=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
1, 2, 3]])
assert isna(index[4][0])
assert isna(index.values[4][0])
def test_duplicate_groupby_issues(self):
idx_tp = [('600809', '20061231'), ('600809', '20070331'),
('600809', '20070630'), ('600809', '20070331')]
dt = ['demo', 'demo', 'demo', 'demo']
idx = MultiIndex.from_tuples(idx_tp, names=['STK_ID', 'RPT_Date'])
s = Series(dt, index=idx)
result = s.groupby(s.index).first()
assert len(result) == 3
def test_duplicate_mi(self):
# GH 4516
df = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['bah', 'bam', 3.0, 3],
['bah', 'bam', 4.0, 4], ['foo', 'bar', 5.0, 5],
['bah', 'bam', 6.0, 6]],
columns=list('ABCD'))
df = df.set_index(['A', 'B'])
df = df.sort_index(level=0)
expected = DataFrame([['foo', 'bar', 1.0, 1], ['foo', 'bar', 2.0, 2],
['foo', 'bar', 5.0, 5]],
columns=list('ABCD')).set_index(['A', 'B'])
result = df.loc[('foo', 'bar')]
tm.assert_frame_equal(result, expected)
def test_duplicated_drop_duplicates(self):
# GH 4060
idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2]))
expected = np.array(
[False, False, False, True, False, False], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(), expected)
expected = np.array([True, False, False, False, False, False])
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep='last'), expected)
expected = np.array([True, False, False, True, False, False])
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2]))
tm.assert_index_equal(idx.drop_duplicates(keep=False), expected)
def test_multiindex_set_index(self):
# segfault in #3308
d = {'t1': [2, 2.5, 3], 't2': [4, 5, 6]}
df = DataFrame(d)
tuples = [(0, 1), (0, 2), (1, 2)]
df['tuples'] = tuples
index = MultiIndex.from_tuples(df['tuples'])
# it works!
df.set_index(index)
def test_datetimeindex(self):
idx1 = pd.DatetimeIndex(
['2013-04-01 9:00', '2013-04-02 9:00', '2013-04-03 9:00'
] * 2, tz='Asia/Tokyo')
idx2 = pd.date_range('2010/01/01', periods=6, freq='M',
tz='US/Eastern')
idx = MultiIndex.from_arrays([idx1, idx2])
expected1 = pd.DatetimeIndex(['2013-04-01 9:00', '2013-04-02 9:00',
'2013-04-03 9:00'], tz='Asia/Tokyo')
tm.assert_index_equal(idx.levels[0], expected1)
tm.assert_index_equal(idx.levels[1], idx2)
# from datetime combos
# GH 7888
date1 = datetime.date.today()
date2 = datetime.datetime.today()
date3 = Timestamp.today()
for d1, d2 in itertools.product(
[date1, date2, date3], [date1, date2, date3]):
index = pd.MultiIndex.from_product([[d1], [d2]])
assert isinstance(index.levels[0], pd.DatetimeIndex)
assert isinstance(index.levels[1], pd.DatetimeIndex)
def test_constructor_with_tz(self):
index = pd.DatetimeIndex(['2013/01/01 09:00', '2013/01/02 09:00'],
name='dt1', tz='US/Pacific')
columns = pd.DatetimeIndex(['2014/01/01 09:00', '2014/01/02 09:00'],
name='dt2', tz='Asia/Tokyo')
result = MultiIndex.from_arrays([index, columns])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
result = MultiIndex.from_arrays([Series(index), Series(columns)])
tm.assert_index_equal(result.levels[0], index)
tm.assert_index_equal(result.levels[1], columns)
def test_set_index_datetime(self):
# GH 3950
df = pd.DataFrame(
{'label': ['a', 'a', 'a', 'b', 'b', 'b'],
'datetime': ['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
'value': range(6)})
df.index = pd.to_datetime(df.pop('datetime'), utc=True)
df.index = df.index.tz_localize('UTC').tz_convert('US/Pacific')
expected = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], name='datetime')
expected = expected.tz_localize('UTC').tz_convert('US/Pacific')
df = df.set_index('label', append=True)
tm.assert_index_equal(df.index.levels[0], expected)
tm.assert_index_equal(df.index.levels[1],
pd.Index(['a', 'b'], name='label'))
df = df.swaplevel(0, 1)
tm.assert_index_equal(df.index.levels[0],
pd.Index(['a', 'b'], name='label'))
tm.assert_index_equal(df.index.levels[1], expected)
df = DataFrame(np.random.random(6))
idx1 = pd.DatetimeIndex(['2011-07-19 07:00:00', '2011-07-19 08:00:00',
'2011-07-19 09:00:00', '2011-07-19 07:00:00',
'2011-07-19 08:00:00', '2011-07-19 09:00:00'],
tz='US/Eastern')
idx2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-01 09:00',
'2012-04-01 09:00', '2012-04-02 09:00',
'2012-04-02 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
idx3 = pd.date_range('2011-01-01 09:00', periods=6, tz='Asia/Tokyo')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.DatetimeIndex(['2011-07-19 07:00:00',
'2011-07-19 08:00:00',
'2011-07-19 09:00:00'], tz='US/Eastern')
expected2 = pd.DatetimeIndex(['2012-04-01 09:00', '2012-04-02 09:00'],
tz='US/Eastern')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
# GH 7092
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_reset_index_datetime(self):
# GH 3950
for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:
idx1 = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz,
name='idx1')
idx2 = pd.Index(range(5), name='idx2', dtype='int64')
idx = pd.MultiIndex.from_arrays([idx1, idx2])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
idx3 = pd.date_range('1/1/2012', periods=5, freq='MS',
tz='Europe/Paris', name='idx3')
idx = pd.MultiIndex.from_arrays([idx1, idx2, idx3])
df = pd.DataFrame(
{'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)
expected = pd.DataFrame({'idx1': [datetime.datetime(2011, 1, 1),
datetime.datetime(2011, 1, 2),
datetime.datetime(2011, 1, 3),
datetime.datetime(2011, 1, 4),
datetime.datetime(2011, 1, 5)],
'idx2': np.arange(5, dtype='int64'),
'idx3': [datetime.datetime(2012, 1, 1),
datetime.datetime(2012, 2, 1),
datetime.datetime(2012, 3, 1),
datetime.datetime(2012, 4, 1),
datetime.datetime(2012, 5, 1)],
'a': np.arange(5, dtype='int64'),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx1', 'idx2', 'idx3', 'a', 'b'])
expected['idx1'] = expected['idx1'].apply(
lambda d: pd.Timestamp(d, tz=tz))
expected['idx3'] = expected['idx3'].apply(
lambda d: pd.Timestamp(d, tz='Europe/Paris'))
tm.assert_frame_equal(df.reset_index(), expected)
# GH 7793
idx = pd.MultiIndex.from_product([['a', 'b'], pd.date_range(
'20130101', periods=3, tz=tz)])
df = pd.DataFrame(
np.arange(6, dtype='int64').reshape(
6, 1), columns=['a'], index=idx)
expected = pd.DataFrame({'level_0': 'a a a b b b'.split(),
'level_1': [
datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 2),
datetime.datetime(2013, 1, 3)] * 2,
'a': np.arange(6, dtype='int64')},
columns=['level_0', 'level_1', 'a'])
expected['level_1'] = expected['level_1'].apply(
lambda d: pd.Timestamp(d, freq='D', tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_period(self):
# GH 7746
idx = pd.MultiIndex.from_product([pd.period_range('20130101',
periods=3, freq='M'),
['a', 'b', 'c']],
names=['month', 'feature'])
df = pd.DataFrame(np.arange(9, dtype='int64')
.reshape(-1, 1),
index=idx, columns=['a'])
expected = pd.DataFrame({
'month': ([pd.Period('2013-01', freq='M')] * 3 +
[pd.Period('2013-02', freq='M')] * 3 +
[pd.Period('2013-03', freq='M')] * 3),
'feature': ['a', 'b', 'c'] * 3,
'a': np.arange(9, dtype='int64')
}, columns=['month', 'feature', 'a'])
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_multiindex_columns(self):
levels = [['A', ''], ['B', 'b']]
df = pd.DataFrame([[0, 2], [1, 3]],
columns=pd.MultiIndex.from_tuples(levels))
result = df[['B']].rename_axis('A').reset_index()
tm.assert_frame_equal(result, df)
# gh-16120: already existing column
with tm.assert_raises_regex(ValueError,
("cannot insert \('A', ''\), "
"already exists")):
df.rename_axis('A').reset_index()
# gh-16164: multiindex (tuple) full key
result = df.set_index([('A', '')]).reset_index()
tm.assert_frame_equal(result, df)
# with additional (unnamed) index level
idx_col = pd.DataFrame([[0], [1]],
columns=pd.MultiIndex.from_tuples([('level_0',
'')]))
expected = pd.concat([idx_col, df[[('B', 'b'), ('A', '')]]], axis=1)
result = df.set_index([('B', 'b')], append=True).reset_index()
tm.assert_frame_equal(result, expected)
# with index name which is a too long tuple...
with tm.assert_raises_regex(ValueError,
("Item must have length equal to number "
"of levels.")):
df.rename_axis([('C', 'c', 'i')]).reset_index()
# or too short...
levels = [['A', 'a', ''], ['B', 'b', 'i']]
df2 = pd.DataFrame([[0, 2], [1, 3]],
columns=pd.MultiIndex.from_tuples(levels))
idx_col = pd.DataFrame([[0], [1]],
columns=pd.MultiIndex.from_tuples([('C',
'c',
'ii')]))
expected = pd.concat([idx_col, df2], axis=1)
result = df2.rename_axis([('C', 'c')]).reset_index(col_fill='ii')
tm.assert_frame_equal(result, expected)
# ... which is incompatible with col_fill=None
with tm.assert_raises_regex(ValueError,
("col_fill=None is incompatible with "
"incomplete column name \('C', 'c'\)")):
df2.rename_axis([('C', 'c')]).reset_index(col_fill=None)
# with col_level != 0
result = df2.rename_axis([('c', 'ii')]).reset_index(col_level=1,
col_fill='C')
tm.assert_frame_equal(result, expected)
def test_set_index_period(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = pd.period_range('2011-01-01', periods=3, freq='M')
idx1 = idx1.append(idx1)
idx2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
idx2 = idx2.append(idx2).append(idx2)
idx3 = pd.period_range('2005', periods=6, freq='A')
df = df.set_index(idx1)
df = df.set_index(idx2, append=True)
df = df.set_index(idx3, append=True)
expected1 = pd.period_range('2011-01-01', periods=3, freq='M')
expected2 = pd.period_range('2013-01-01 09:00', periods=2, freq='H')
tm.assert_index_equal(df.index.levels[0], expected1)
tm.assert_index_equal(df.index.levels[1], expected2)
tm.assert_index_equal(df.index.levels[2], idx3)
tm.assert_index_equal(df.index.get_level_values(0), idx1)
tm.assert_index_equal(df.index.get_level_values(1), idx2)
tm.assert_index_equal(df.index.get_level_values(2), idx3)
def test_repeat(self):
# GH 9361
# fixed by # GH 7891
m_idx = pd.MultiIndex.from_tuples([(1, 2), (3, 4), (5, 6), (7, 8)])
data = ['a', 'b', 'c', 'd']
m_df = pd.Series(data, index=m_idx)
assert m_df.repeat(3).shape == (3 * len(data), )
def test_iloc_mi(self):
# GH 13797
# Test if iloc can handle integer locations in MultiIndexed DataFrame
data = [
['str00', 'str01'],
['str10', 'str11'],
['str20', 'srt21'],
['str30', 'str31'],
['str40', 'str41']
]
mi = pd.MultiIndex.from_tuples(
[('CC', 'A'),
('CC', 'B'),
('CC', 'B'),
('BB', 'a'),
('BB', 'b')
])
expected = pd.DataFrame(data)
df_mi = pd.DataFrame(data, index=mi)
result = pd.DataFrame([[df_mi.iloc[r, c] for c in range(2)]
for r in range(5)])
tm.assert_frame_equal(result, expected)
class TestSorted(Base):
""" everthing you wanted to test about sorting """
def test_sort_index_preserve_levels(self):
result = self.frame.sort_index()
assert result.index.names == self.frame.index.names
def test_sorting_repr_8017(self):
np.random.seed(0)
data = np.random.randn(3, 4)
for gen, extra in [([1., 3., 2., 5.], 4.), ([1, 3, 2, 5], 4),
([Timestamp('20130101'), Timestamp('20130103'),
Timestamp('20130102'), Timestamp('20130105')],
Timestamp('20130104')),
(['1one', '3one', '2one', '5one'], '4one')]:
columns = MultiIndex.from_tuples([('red', i) for i in gen])
df = DataFrame(data, index=list('def'), columns=columns)
df2 = pd.concat([df,
DataFrame('world', index=list('def'),
columns=MultiIndex.from_tuples(
[('red', extra)]))], axis=1)
# check that the repr is good
# make sure that we have a correct sparsified repr
# e.g. only 1 header of read
assert str(df2).splitlines()[0].split() == ['red']
# GH 8017
# sorting fails after columns added
# construct single-dtype then sort
result = df.copy().sort_index(axis=1)
expected = df.iloc[:, [0, 2, 1, 3]]
tm.assert_frame_equal(result, expected)
result = df2.sort_index(axis=1)
expected = df2.iloc[:, [0, 2, 1, 4, 3]]
tm.assert_frame_equal(result, expected)
# setitem then sort
result = df.copy()
result[('red', extra)] = 'world'
result = result.sort_index(axis=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level(self):
df = self.frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = self.frame['A'].sort_index(level=0)
# preserve names
assert a_sorted.index.names == self.frame.index.names
# inplace
rs = self.frame.copy()
rs.sort_index(level=0, inplace=True)
tm.assert_frame_equal(rs, self.frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# #2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# #2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
self.frame.index.names = ['first', 'second']
result = self.frame.sort_index(level='second')
expected = self.frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
sorted_before = self.frame.sort_index(level=1)
df = self.frame.copy()
df['foo'] = 'bar'
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before,
sorted_after.drop(['foo'], axis=1))
dft = self.frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft['foo', 'three'] = 'bar'
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(sorted_before.drop([('foo', 'three')], axis=1),
sorted_after.drop([('foo', 'three')], axis=1))
def test_is_lexsorted(self):
levels = [[0, 1], [0, 1, 2]]
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
assert index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]])
assert not index.is_lexsorted()
index = MultiIndex(levels=levels,
labels=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]])
assert not index.is_lexsorted()
assert index.lexsort_depth == 0
def test_getitem_multilevel_index_tuple_not_sorted(self):
index_columns = list("abc")
df = DataFrame([[0, 1, 0, "x"], [0, 0, 1, "y"]],
columns=index_columns + ["data"])
df = df.set_index(index_columns)
query_index = df.index[:1]
rs = df.loc[query_index, "data"]
xp_idx = MultiIndex.from_tuples([(0, 1, 0)], names=['a', 'b', 'c'])
xp = Series(['x'], index=xp_idx, name='data')
tm.assert_series_equal(rs, xp)
def test_getitem_slice_not_sorted(self):
df = self.frame.sort_index(level=1).T
# buglet with int typechecking
result = df.iloc[:, :np.int32(3)]
expected = df.reindex(columns=df.columns[:3])
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted2(self):
# 13431
df = DataFrame({'col1': ['b', 'd', 'b', 'a'],
'col2': [3, 1, 1, 2],
'data': ['one', 'two', 'three', 'four']})
df2 = df.set_index(['col1', 'col2'])
df2_original = df2.copy()
df2.index.set_levels(['b', 'd', 'a'], level='col1', inplace=True)
df2.index.set_labels([0, 1, 0, 2], level='col1', inplace=True)
assert not df2.index.is_lexsorted()
assert not df2.index.is_monotonic
assert df2_original.index.equals(df2.index)
expected = df2.sort_index()
assert expected.index.is_lexsorted()
assert expected.index.is_monotonic
result = df2.sort_index(level=0)
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_frame_getitem_not_sorted(self):
df = self.frame.T
df['foo', 'four'] = 'foo'
arrays = [np.array(x) for x in zip(*df.columns.values)]
result = df['foo']
result2 = df.loc[:, 'foo']
expected = df.reindex(columns=df.columns[arrays[0] == 'foo'])
expected.columns = expected.columns.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
df = df.T
result = df.xs('foo')
result2 = df.loc['foo']
expected = df.reindex(df.index[arrays[0] == 'foo'])
expected.index = expected.index.droplevel(0)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_series_getitem_not_sorted(self):
arrays = [['bar', 'bar', 'baz', 'baz', 'qux', 'qux', 'foo', 'foo'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
tuples = lzip(*arrays)
index = MultiIndex.from_tuples(tuples)
s = Series(randn(8), index=index)
arrays = [np.array(x) for x in zip(*index.values)]
result = s['qux']
result2 = s.loc['qux']
expected = s[arrays[0] == 'qux']
expected.index = expected.index.droplevel(0)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
def test_sort_index_and_reconstruction(self):
# 15622
# lexsortedness should be identical
# across MultiIndex consruction methods
df = DataFrame([[1, 1], [2, 2]], index=list('ab'))
expected = DataFrame([[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples([(0.5, 'a'),
(0.5, 'b'),
(0.8, 'a'),
(0.8, 'b')]))
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list('ab')]))
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(levels=[[0.5, 0.8], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# 14015
df = DataFrame([[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, '20160811 12:00:00'),
(0, '20160809 12:00:00')],
names=['l1', 'Date']))
df.columns.set_levels(pd.to_datetime(df.columns.levels[1]),
level=1,
inplace=True)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame({'value': [1, 2, 3, 4]},
index=MultiIndex(
levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame({'value': [2, 1, 4, 3]},
index=MultiIndex(
levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]]))
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_reorder_on_ops(self):
# 15687
df = pd.DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[['a', 'b'],
['big', 'small'],
['red', 'blu']],
names=['letter', 'size', 'color']),
columns=['near', 'far'])
df = df.sort_index()
def my_func(group):
group.index = ['newz', 'newa']
return group
result = df.groupby(level=['letter', 'size']).apply(
my_func).sort_index()
expected = MultiIndex.from_product(
[['a', 'b'],
['big', 'small'],
['newa', 'newz']],
names=['letter', 'size', None])
tm.assert_index_equal(result.index, expected)
def test_sort_non_lexsorted(self):
# degenerate case where we sort but don't
# have a satisfying result :<
# GH 15797
idx = MultiIndex([['A', 'B', 'C'],
['c', 'b', 'a']],
[[0, 1, 2, 0, 1, 2],
[0, 2, 1, 1, 0, 2]])
df = DataFrame({'col': range(len(idx))},
index=idx,
dtype='int64')
assert df.index.is_lexsorted() is False
assert df.index.is_monotonic is False
sorted = df.sort_index()
assert sorted.index.is_lexsorted() is True
assert sorted.index.is_monotonic is True
expected = DataFrame(
{'col': [1, 4, 5, 2]},
index=MultiIndex.from_tuples([('B', 'a'), ('B', 'c'),
('C', 'a'), ('C', 'b')]),
dtype='int64')
result = sorted.loc[pd.IndexSlice['B':'C', 'a':'c'], :]
tm.assert_frame_equal(result, expected)
def test_sort_index_nan(self):
# GH 14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4),
index=mi, columns=list('ABCD'))
s = Series(np.arange(4), index=mi)
df2 = DataFrame({
'date': pd.to_datetime([
'20121002', '20121007', '20130130', '20130202', '20130305',
'20121002', '20121207', '20130130', '20130202', '20130305',
'20130202', '20130305'
]),
'user_id': [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
'whole_cost': [1790, np.nan, 280, 259, np.nan, 623, 90, 312,
np.nan, 301, 359, 801],
'cost': [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12]
}).set_index(['date', 'user_id'])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position='last')
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position='first')
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position='last')
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position='first')
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_ascending_list(self):
# GH: 16934
# Set up a Series with a three level MultiIndex
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two'],
[4, 3, 2, 1, 4, 3, 2, 1]]
tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples,
names=['first', 'second', 'third'])
s = pd.Series(range(8), index=index)
# Sort with boolean ascending
result = s.sort_index(level=['third', 'first'], ascending=False)
expected = s.iloc[[4, 0, 5, 1, 6, 2, 7, 3]]
tm.assert_series_equal(result, expected)
# Sort with list of boolean ascending
result = s.sort_index(level=['third', 'first'],
ascending=[False, True])
expected = s.iloc[[0, 4, 1, 5, 2, 6, 3, 7]]
tm.assert_series_equal(result, expected)
| bsd-3-clause |
Tong-Chen/scikit-learn | examples/plot_digits_classification.py | 7 | 2231 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index + 1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples / 2:], predicted)[:4]):
pl.subplot(2, 4, index + 5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
pl.show()
| bsd-3-clause |
alvarofierroclavero/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.