repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xavierwu/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
arcyfelix/Courses | 17-06-05-Machine-Learning-For-Trading/27_bollinger_bands.py | 1 | 2194 | import os
import pandas as pd
import matplotlib.pyplot as plt
''' Read: http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-stats '''
def symbol_to_path(symbol, base_dir = 'data'):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def dates_creator():
start_date = '2013-01-01'
end_date = '2013-12-31'
dates = pd.date_range(start_date, end_date)
return dates
def get_data(symbols, dates):
df = pd.DataFrame(index = dates)
if 'SPY' not in symbols: # adding SPY as the main reference
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol),
index_col = 'Date',
parse_dates = True,
usecols = ['Date', 'Adj Close'],
na_values = ['nan'])
df_temp = df_temp.rename(columns = {'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset = ['SPY'])
print(df)
return df
def plot(df, symbols):
ax = df.plot(title = 'Stock prices', fontsize = 12)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
plt.show()
def get_rolling_mean(df, window):
return df.rolling(window = window, center = False).mean()
def get_rolling_std(df, window):
return df.rolling(window = window, center = False).std()
def bollinger_bands(df, window):
rolling_mean = get_rolling_mean(df, window)
rolling_std = get_rolling_std(df, window)
upper_band = rolling_mean + 2 * rolling_std
lower_band = rolling_mean - 2 * rolling_std
return upper_band, lower_band
def print_pred_statistics(df, window):
# Plotting SPY
ax = df['SPY'].plot(title = 'SPY vs SPY Rolling Mean', label = 'SPY')
# Updated API for rolling mean!
rm_SPY = get_rolling_mean(df['SPY'], window)
# Plotting Rolling Mean of SPY
rm_SPY.plot(label = 'Rolling Mean', ax = ax )
# Calculating Bollinger Bands (R)
upper_bollinger, lower_bollinger = bollinger_bands(df['SPY'], window = window)
upper_bollinger.plot(label = 'Upper band', ax = ax)
lower_bollinger.plot(label = 'Lower band', ax = ax)
# Adding the legend
ax.legend(loc = 'upper left')
# Show!
plt.show()
symbols = ['SPY']
if __name__ == "__main__":
dates = dates_creator()
df = get_data(symbols, dates)
print_pred_statistics(df, window = 20)
| apache-2.0 |
jmschrei/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
ithemal/Ithemal | timing_tools/timing/gettiming.py | 1 | 10504 | from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import sys
import common_libs.utilities as ut
from tqdm import tqdm
import subprocess
import os
import re
import time
import argparse
def wait_timeout(proc, seconds):
"""Wait for a process to finish, or raise exception after timeout"""
start = time.time()
end = start + seconds
interval = min(seconds / 1000.0, .25)
while True:
result = proc.poll()
if result is not None:
return result
if time.time() >= end:
proc.kill()
return None
time.sleep(interval)
def fix_reg_names(line):
# nasm recognizes, for instance, r14d rather than r14l
regs = [('r%dl'%x, 'r%dd'%x) for x in range(8, 16)]
for old, new in regs:
line = line.replace(old, new)
return line
def remove_unrecog_words(line):
words = ['ptr', '<rel>']
for word in words:
line = line.replace(word,'')
return line
def add_memory_prefix(line):
mem = re.search('.*\[(.*)\].*', line)
if (mem != None and
re.match('.*(rsp|rbp|esp|ebp)', mem.group(1)) is None and
not line.strip().startswith('lea')):
index = mem.span(1)[0]
line = line[:index] + 'UserData + ' + line[index:]
return line
def insert_time_value(cnx,code_id, time, arch, ttable):
sql = 'INSERT INTO ' + ttable + ' (code_id, arch, kind, time) VALUES(' + str(code_id) + ',' + str(arch) + ',\'actual\',' + str(time) + ')'
ut.execute_query(cnx, sql, False)
cnx.commit()
def insert_col_values(cnx, cols, values, code_id, arch, ttable):
for i in range(len(values[0])):
colstr = ''
valuestr = ''
for j, col in enumerate(cols):
if j != len(cols) - 1:
colstr += col + ', '
valuestr += str(values[j][i]) + ', '
else:
colstr += col
valuestr += str(values[j][i])
sql = 'INSERT INTO ' + ttable + ' (code_id, arch, kind,' + colstr + ') VALUES(' + str(code_id) + ',' + str(arch) + ',\'actual\',' + valuestr + ')'
print sql
ut.execute_query(cnx, sql, False)
cnx.commit()
class PMCValue:
def __init__(self, value):
self.value = value
self.count = 1
class PMC:
def __init__(self, name):
self.name = name
self.values = []
self.mod_values = []
self.mode = None
self.percentage = 5
def add_value(self, nvalue):
self.values.append(nvalue)
added = False
for val in self.mod_values:
if val.value == 0:
val.value = 1e-3
if (abs(val.value - nvalue) * 100.0 / val.value) < self.percentage:
val.value = (val.value * val.count + nvalue) / (val.count + 1)
val.count += 1
added = True
break
if not added:
val = PMCValue(nvalue)
self.mod_values.append(val)
def set_mode(self):
max_count = 0
for val in self.mod_values:
if val.count > max_count:
self.mode = val.value
max_count = val.count
class PMCCounters:
def __init__(self,line):
names = line.split()
#print names
self.counters = list()
for name in names:
self.counters.append(PMC(name))
def add_to_counters(self, line):
values = line.split()
#print values
if len(values) != len(self.counters):
return
for i, value in enumerate(values):
self.counters[i].add_value(int(value))
def set_modes(self):
for counter in self.counters:
counter.set_mode()
def get_value(self, name):
for counter in self.counters:
if name == counter.name:
return counter.values
return None
def get_mode(self, name):
for counter in self.counters:
if name == counter.name:
return counter.mode
return None
def check_error(line):
errors = ['error','fault']
warnings = ['warning']
for error in errors:
for warning in warnings:
if error in line and not warning in line:
return True
return False
if __name__ == '__main__':
#command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--arch',action='store',type=int,required=True)
parser.add_argument('--database',action='store',type=str,required=True)
parser.add_argument('--user',action='store', type=str, required=True)
parser.add_argument('--password',action='store', type=str, required=True)
parser.add_argument('--port',action='store', type=int, required=True)
parser.add_argument('--ctable',action='store',type=str, required=True)
parser.add_argument('--ttable',action='store',type=str, required=True)
parser.add_argument('--limit',action='store',type=int, default=None)
parser.add_argument('--tp',action='store',type=bool,default=False)
args = parser.parse_args(sys.argv[1:])
cnx = ut.create_connection(database=args.database, user=args.user, password=args.password, port=args.port)
sql = 'SELECT code_intel, code_id from ' + args.ctable
rows = ut.execute_query(cnx, sql, True)
print len(rows)
harness_dir = os.environ['ITHEMAL_HOME'] + '/timing_tools/harness'
os.chdir(harness_dir)
total = 0
errors = 0
except_errors = 0
success = 0
not_finished = 0
total_time = 0.0
total_bbs = 0
# do a dry run to figure out measurement overhead
with open('bb.nasm', 'w') as f:
f.close()
proc = subprocess.Popen('./a64-out.sh', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = wait_timeout(proc, 10)
startHeading = False
startTimes = False
counters = None
for i, line in enumerate(iter(proc.stdout.readline, '')):
if 'Clock' in line and startTimes == False and startHeading == False: #still didn't start collecting the actual timing data
startHeading = True
if startHeading == True:
counters = PMCCounters(line)
startTimes = True
startHeading = False
elif startTimes == True:
counters.add_to_counters(line)
assert counters is not None
counters.set_modes()
overhead = counters.get_mode('Core_cyc')
print 'OVERHEAD =', overhead
for row in rows:
if row[0] == None:
continue
splitted = row[0].split('\n')
written = 0
final_bb = []
for i, line in enumerate(splitted):
if line != '':
line = remove_unrecog_words(line + '\n')
line = fix_reg_names(line)
final_bb.append(line)
written += 1
if written > 0:
total += 1
with open('bb.nasm','w+') as f:
f.writelines(final_bb)
proc = subprocess.Popen('./a64-out.sh', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
start_time = time.time()
result = wait_timeout(proc, 10)
end_time = time.time()
if result != None:
print final_bb
try:
error_lines = False
for line in iter(proc.stderr.readline, ''):
if check_error(line):
print 'error ' + line
error_lines = True
break
if error_lines == False:
startHeading = False
startTimes = False
counters = None
for i, line in enumerate(iter(proc.stdout.readline, '')):
print line
if 'Clock' in line and startTimes == False and startHeading == False: #still didn't start collecting the actual timing data
startHeading = True
if startHeading == True:
#print 'headings ' + line
counters = PMCCounters(line)
startTimes = True
startHeading = False
elif startTimes == True:
#print 'values ' + line
counters.add_to_counters(line)
if counters != None:
names = ['Core_cyc', 'L1_read_misses', 'L1_write_misses', 'iCache_misses', 'Context_switches']
columns = ['time', 'l1drmisses', 'l1dwmisses', 'l1imisses', 'conswitch']
values = []
aval_cols = []
for i, name in enumerate(names):
vs = counters.get_value(name)
if vs != None:
values.append(vs)
aval_cols.append(columns[i])
if name == 'Core_cyc':
for j, v in enumerate(values[-1]):
values[-1][j] -= overhead
print aval_cols, values
if not args.tp:
insert_col_values(cnx, aval_cols, values, row[1], args.arch, args.ttable)
total_time += end_time - start_time
total_bbs += 1
print float(total_bbs)/total_time
success += 1
else:
for line in final_bb:
print line[:-1]
errors += 1
except Exception as e:
print e
print 'exception occurred'
except_errors += 1
else:
print 'error not completed'
not_finished += 1
if args.limit != None:
if success == args.limit:
break
print total, success, errors, not_finished, except_errors
print overhead
cnx.close()
| mit |
Groovy-Dragon/tcRIP | UL_HMM.py | 1 | 24366 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 10:44:38 2017
@author: lewismoffat
"""
"""
Python module for creating, training and applying hidden
Markov models to discrete or continuous observations.
Author: Michael Hamilton, [email protected]
Theoretical concepts obtained from Rabiner, 1989.
"""
import numpy, pylab, time, copy
from numpy import random as rand
from numpy import linalg
from matplotlib import rc
rc('text', usetex=True)
class HMM_Classifier:
"""
A binary hmm classifier that utilizes two hmms: one corresponding
to the positive activity and one corresponding to the negative
activity.
"""
def __init__( self, **args ):
"""
:Keywords:
- `neg_hmm` - hmm corresponding to negative activity
- `pos_hmm` - hmm corresponding to positive activity
"""
self.neg_hmm = None
self.pos_hmm = None
if 'neg_hmm' in args:
self.neg_hmm = args[ 'neg_hmm' ]
if 'pos_hmm' in args:
self.pos_hmm = args[ 'pos_hmm' ]
def classify( self, sample ):
"""
Classification is performed by calculating the
log odds for the positive activity. Since the hmms
return a log-likelihood (due to scaling)
of the corresponding activity, the difference of
the two log-likelihoods is the log odds.
"""
# Scream if an hmm is missing
if self.pos_hmm == None or self.neg_hmm == None:
raise "pos/neg hmm(s) missing"
pos_ll = forward( self.pos_hmm, sample, scaling=1 )[ 0 ]
neg_ll = forward( self.neg_hmm, sample, scaling=1 )[ 0 ]
# log odds by difference of log-likelihoods
return pos_ll - neg_ll
def add_pos_hmm( self, pos_hmm ):
"""
Add the hmm corresponding to positive
activity. Replaces current positive hmm, if it exists.
"""
self.pos_hmm = pos_hmm
def add_neg_hmm( self, neg_hmm ):
"""
Add the hmm corresponding to negative
activity. Replaces current negative hmm, if it exists.
"""
self.neg_hmm = neg_hmm
class HMM:
"""
Creates and maintains a hidden Markov model. This version assumes the every state can be
reached DIRECTLY from any other state (ergodic). This, of course, excludes the start state.
Hence the state transition matrix, A, must be N X N . The observable symbol probability
distributions are represented by an N X M matrix where M is the number of observation
symbols.
|a_11 a_12 ... a_1N| |b_11 b_12 ... b_1M|
|a_21 a_22 ... a_2N| |b_21 b_22 ... b_2M|
A = | . . . | B = | . . . |
| . . . | | . . . |
|a_N1 a_N2 ... a_NN| |b_N1 b_N2 ... b_NM|
a_ij = P(q_t = S_j|q_t-1 = S_i) b_ik = P(v_k at t|q_t = S_i)
where q_t is state at time t and v_k is k_th symbol of observation sequence
"""
def __init__( self, n_states=1, **args ):
"""
:Keywords:
- `n_states` - number of hidden states
- `V` - list of all observable symbols
- `A` - transition matrix
- `B` - observable symbol probability distribution
- `D` - dimensionality of continuous observations
- `F` - Fixed emission probabilities for the given state ( dict: i -> numpy.array( [n_states] ),
where i is the state to hold fixed.
"""
self.N = n_states # Number of hidden states
# Initialize observable symbol set parameters
self.V = args[ 'V' ]
self.M = len( self.V )
self.symbol_map = dict( zip ( self.V, range( len( self.V ) )) )
# Initialize transition probability matrix
if 'A' in args:
self.A = args[ 'A' ]
assert numpy.shape( self.A ) == ( self.N, self.N )
else:
# Randomly initialize matrix and normalize so sum over a row = 1
raw_A = rand.uniform( size = self.N * self.N ).reshape( ( self.N, self.N ) )
self.A = ( raw_A.T / raw_A.T.sum( 0 ) ).T
if n_states == 1:
self.A.reshape( (1,1) )
# Initialize observable symbol probability distributions
if 'B' in args:
self.B = args[ 'B' ]
if n_states > 1:
assert numpy.shape( self.B ) == ( self.N, self.M )
else:
self.B = numpy.reshape(self.B, (1,self.M) )
if 'F' in args:
self.F = args[ 'F' ]
for i in self.F.keys():
self.B[ i,: ] = self.F[ i ]
else:
self.F = {}
else:
# initialize distribution
B_raw = rand.uniform( 0, 1, self.N * self.M ).reshape( ( self.N, self.M ) )
self.B = ( B_raw.T / B_raw.T.sum( 0 ) ).T
if 'F' in args:
self.F = args[ 'F' ]
for i in self.F.keys():
self.B[ i,: ] = self.F[ i ]
else:
self.F = {}
# Initialize the intitial state distribution
if 'Pi' in args:
self.Pi = args[ 'Pi' ]
assert len( self.Pi ) == self.N
else:
# initialize to uniform distribution
self.Pi = numpy.array ( 1.0 / self.N ).repeat( self.N )
if 'Labels' in args:
self.Labels = args[ 'Labels' ]
else:
self.Labels = range( self.N )
if 'F' in args:
self.F = args[ 'F' ]
for i in self.F.keys():
self.B[ i,: ] = self.F[ i ]
else:
self.F = {}
def __repr__( self ):
print(self.A)
retn = ""
retn += "num hiddens: %d\n" % ( self.N ) + \
"symbols: %s\n" % ( self.V ) + \
"\nA:\n %s\n" % ( str( self.A ) ) + \
"Pi:\n %s" % ( str( self.Pi ) )
return retn
def symbol_index( hmm, Obs ):
"""
Converts an obeservation symbol sequence into a sequence
of indices for accessing distribution matrices.
"""
Obs_ind = []
for o in Obs: Obs_ind.append( hmm.symbol_map[ o ] )
return Obs_ind
def forward( hmm, Obs, scaling=True ):
"""
Calculate the probability of an observation sequence, Obs,
given the model, P(Obs|hmm).
Obs: observation sequence
hmm: model
returns: P(Obs|hmm)
"""
T = len( Obs ) # Number of states in observation sequence
# Get index sequence of observation sequence to access
# the observable symbol probabilty distribution matrix
Obs = symbol_index( hmm, Obs )
# create scaling vector
if scaling:
c = numpy.zeros( [ T ], float )
# Base Case:
Alpha = numpy.zeros( [ hmm.N, T ], float )
Alpha[ :,0 ] = hmm.Pi * hmm.B[ :,Obs[ 0 ] ]
if scaling:
c[ 0 ] = 1.0 / numpy.sum( Alpha[ :,0 ] )
Alpha[ :,0 ] = c[ 0 ] * Alpha[ :,0 ]
# Induction Step:
for t in xrange( 1,T ):
Alpha[ :,t ] = numpy.dot( Alpha[ :,t-1 ], hmm.A) * hmm.B[ :,Obs[ t ] ]
if scaling:
c[ t ] = 1.0 / numpy.sum( Alpha[ :,t ] )
Alpha[ :,t] = Alpha[ :,t] * c[ t ]
if scaling:
log_Prob_Obs = -( numpy.sum( numpy.log( c ) ) )
return ( log_Prob_Obs, Alpha, c )
else:
prob_Obs = numpy.sum( Alpha[ :,T-1 ] )
return ( prob_Obs, Alpha )
def backward( hmm, Obs, c=None ):
"""
Calculate the probability of a partial observation sequence
from t+1 to T, given some state t.
Obs: observation sequence
hmm: model
c: the scaling coefficients from forward algorithm
returns: B_t(i)
"""
T = len( Obs ) # Number of states in observation sequence
# Get index sequence of observation sequence to access
# the observable symbol probabilty distribution matrix
Obs = symbol_index( hmm, Obs )
# Base Case:
Beta = numpy.zeros( [ hmm.N, T ], float )
Beta[ :, T-1 ] = 1.0
if c is not None:
Beta [ :,T-1 ] = Beta [ :,T-1 ] * c[ T-1 ]
# Inductive Step:
for t in reversed( xrange( T-1 ) ):
Beta[ :,t ] = numpy.dot( hmm.A, ( hmm.B[ :,Obs[ t+1 ] ] * Beta[ :,t+1 ] ) )
if c is not None:
Beta[ :,t ] = Beta[ :,t ] * c[ t ]
return Beta
def viterbi( hmm, Obs, scaling=True ):
"""
Calculate P(Q|Obs, hmm) and yield the state sequence Q* that
maximizes this probability.
Obs: observation sequence
hmm: model
"""
T = len( Obs ) # Number of states in observation sequence
# Get index sequence of observation sequence to access
# the observable symbol probabilty distribution matrix
Obs = symbol_index( hmm, Obs )
# Initialization
# Delta[ i,j ] = max_q1,q2,...,qt P( q1, q2,...,qt = i, O_1, O_2,...,O_t|hmm )
# this is the highest prob along a single path at time t ending in state S_i
Delta = numpy.zeros( [ hmm.N,T ], float)
if scaling:
Delta[ :,0 ] = numpy.log( hmm.Pi ) + numpy.log( hmm.B[ :,Obs[ 0] ] )
else:
Delta[ :,0 ] = hmm.Pi * hmm.B[ :,Obs[ 0] ]
# Track Maximal States
Psi = numpy.zeros( [ hmm.N, T ], int )
# Inductive Step:
if scaling:
for t in xrange( 1,T ):
nus = Delta[ :,t-1 ] + numpy.log( hmm.A )
Delta[ :,t ] = nus.max(1) + numpy.log( hmm.B[ :,Obs[ t ] ] )
Psi[ :,t ] = nus.argmax( 1 )
else:
for t in xrange( 1,T ):
nus = Delta[ :,t-1 ] * hmm.A
Delta[ :,t ] = nus.max( 1 ) * hmm.B[ :,Obs[ t ] ]
Psi[ :,t ] = nus.argmax(1)
# Calculate State Sequence, Q*:
Q_star = [ numpy.argmax( Delta[ :,T-1 ] ) ]
for t in reversed( xrange( T-1 ) ) :
Q_star.insert( 0, Psi[ Q_star[ 0 ],t+1 ] )
return ( Q_star, Delta, Psi )
def baum_welch( hmm, Obs_seqs, **args ):
"""
EM algorithm to update Pi, A, and B for the HMM
:Parameters:
- `hmm` - hmm model to train
- `Obs_seqs` - list of observation sequences to train over
:Return:
a trained hmm
:Keywords:
- `epochs` - number of iterations to perform EM, default is 20
- `val_set` - validation data set, not required but recommended to prevent over-fitting
- `updatePi` - flag to update initial state probabilities
- `updateA` - flag to update transition probabilities, default is True
- `updateB` - flag to update observation emission probabilites for discrete types, default is True
- `scaling` - flag to scale probabilities (log scale), default is True
- `graph` - flag to plot log-likelihoods of the training epochs, default is False
- `normUpdate` - flag to use 1 / -(normed log-likelihood) contribution for each observation
sequence when updating model parameters, default if False
- `fname` - file name to save plot figure, default is ll.eps
- `verbose` - flag to print training times and log likelihoods for each training epoch, default is false
"""
# Setup keywords
if 'epochs' in args: epochs = args[ 'epochs' ]
else: epochs = 20
updatePi=updateA=updateB=scaling=graph = 1
normUpdate=verbose=validating = 0
if 'updatePi' in args: updatePi = args[ 'updatePi' ]
if 'updateA' in args: updateA = args[ 'updateA' ]
if 'updateB' in args: updateB = args[ 'updateB' ]
if 'scaling' in args: scaling = args[ 'scaling' ]
if 'graph' in args: graph = args[ 'graph' ]
if 'normUpdate' in args: normUpdate = args[ 'normUpdate' ]
if 'fname' in args: fname = args[ 'fname' ]
else: fname = 'll.eps'
if 'verbose' in args: verbose = args[ 'verbose' ]
if 'val_set' in args:
validating = 1
val_set = args[ 'val_set' ]
K = len( Obs_seqs ) # number of observation sequences
start = time.time() # start training timer
LLs = [] # keep track of log likelihoods for each epoch
val_LLs = [] # keep track of validation log-likelihoods for each epoch
# store best parameters
best_A = copy.deepcopy( hmm.A )
best_B = copy.deepcopy( hmm.B )
best_Pi = copy.deepcopy( hmm.Pi )
best_epoch = 'N/A'
best_val_LL = None
# Iterate over specified number of EM epochs
for epoch in xrange( epochs ):
start_epoch = time.time() # start epoch timer
LL_epoch = 0 # intialize log-likelihood of all seqs given the model
Expect_si_all = numpy.zeros( [ hmm.N ], float ) # Expectation of being in state i over all seqs
Expect_si_all_TM1 = numpy.zeros( [ hmm.N ], float ) # Expectation of being in state i over all seqs until T-1
Expect_si_sj_all = numpy.zeros( [ hmm.N, hmm.N ], float ) # Expectation of transitioning from state i to state j over all seqs
Expect_si_sj_all_TM1 = numpy.zeros( [ hmm.N, hmm.N ], float )
Expect_si_t0_all = numpy.zeros( [ hmm.N ] ) # Expectation of initially being in state i over all seqs
Expect_si_vk_all = numpy.zeros( [ hmm.N, hmm.M ], float ) # Expectation of being in state i and seeing symbol vk
ow = 0
for Obs in Obs_seqs:
if ow > 0 and ow % 100 == 0:
print("epoch %d: %d seqs processed" % ( epoch+1, ow ))
ow += 1
Obs = list( Obs )
log_Prob_Obs, Alpha, c = forward( hmm=hmm, Obs=Obs, scaling=1 ) # Calculate forward probs, log-likelihood, and scaling vals
Beta = backward( hmm=hmm, Obs=Obs, c=c ) # Calculate backward probs
LL_epoch += log_Prob_Obs # Update overall epoch log-likelihood
T = len( Obs ) # Number of states in observation sequence
# Determine update weight of the observation for contribution
# to model parameter maximization
if normUpdate:
w_k = 1.0 / -( log_Prob_Obs + numpy.log( len( Obs ) ) )
else:
w_k = 1.0
# Get index sequence of observation sequence to access
# the observable symbol probabilty distribution matrix
Obs_symbols = Obs[ : ]
Obs = symbol_index( hmm, Obs )
# Calculate gammas
# Gamma[ i,t ] = P( q_t = S_i|Obs, hmm)
Gamma_raw = Alpha * Beta
Gamma = Gamma_raw / Gamma_raw.sum( 0 )
Expect_si_t0_all += w_k * Gamma[ :,0 ]
# Expect_si_all[ i ] = expected number of transitions from state i over all
# training sequences.
Expect_si_all += w_k * Gamma.sum( 1 )
Expect_si_all_TM1 += w_k * Gamma[ :,:T-1 ].sum( 1 )
# Calculate Xis
# Xi is an N X N X T-1 matrix corresponding to
# Xi[ i,j,t ] = P(q_t = S_i, q_t+1 = S_j|Obs, hmm )
Xi = numpy.zeros( [ hmm.N, hmm.N, T-1 ], float )
for t in xrange( T-1 ):
for i in xrange( hmm.N ):
Xi[ i,:,t ] = Alpha[ i,t ] * hmm.A[ i,: ] * hmm.B[ :, Obs[ t+1 ] ] * Beta[ :,t+1 ]
if not scaling:
Xi[ :,:,t ] = Xi[ :,:,t ] / Xi[ :,:,t ].sum()
# Expect_si_sj_all = expected number of transitions from state s_i to state s_j
Expect_si_sj_all += w_k * Xi.sum( 2 ) #which = numpy.array( hmm.V[ k ] == numpy.array( Obs_symbols ) )
Expect_si_sj_all_TM1 += w_k * Xi[ :,:,:T-1].sum( 2 )
if updateB:
B_bar = numpy.zeros( [ hmm.N, hmm.M ], float )
for k in xrange( hmm.M ):
which = numpy.array( [ hmm.V[ k ] == x for x in Obs_symbols ] )
B_bar[ :,k ] = Gamma.T[ which,: ].sum( 0 )
Expect_si_vk_all += w_k * B_bar
############## Reestimate model parameters ###############
# reestimate initial state probabilites
if updatePi:
Expect_si_t0_all = Expect_si_t0_all / numpy.sum( Expect_si_t0_all )
hmm.Pi = Expect_si_t0_all
# reestimate transition probabilites
if updateA:
A_bar = numpy.zeros( [ hmm.N, hmm.N ], float )
for i in xrange( hmm.N ):
A_bar[ i,: ] = Expect_si_sj_all_TM1[ i,: ] / Expect_si_all_TM1[ i ]
hmm.A = A_bar
if updateB:
# reestimate emission probabilites
# ( observable symbol probability distribution )
for i in xrange( hmm.N ):
Expect_si_vk_all[ i,: ] = Expect_si_vk_all [ i,: ] / Expect_si_all[ i ]
hmm.B = Expect_si_vk_all
for i in hmm.F.keys():
hmm.B[ i,: ] = hmm.F[ i ]
LLs.append( LL_epoch )
# Quit if log_likelihoods have plateaued
if epoch > 1:
if LLs[ epoch - 1 ] == LL_epoch:
print("Log-likelihoods have plateaued--terminating training")
break
# if validating, then calculate log-likelihood of validation set
# to determine if training should be terminated.
if validating:
val_LL_epoch = 0
for val_Obs in val_set:
val_Obs = list( val_Obs )
val_LL_epoch += forward( hmm=hmm, Obs=val_Obs, scaling=1 )[ 0 ]
val_LLs.append( val_LL_epoch )
# Terminate training if validation log-likelihood is worse (lower) than
# previous epoch
if epoch > 0:
if val_LL_epoch > best_val_LL:
best_A = copy.deepcopy( hmm.A )
best_B = copy.deepcopy( hmm.B )
best_Pi = copy.deepcopy( hmm.Pi )
best_epoch = epoch
best_val_LL = val_LL_epoch
else:
best_val_LL = val_LL_epoch
best_epoch = 0
if verbose:
print("Finished epoch %d in %d secs" % ( epoch+1, int( time.time() - start_epoch ) ), LL_epoch)
if validating:
print("Validation LL: ", val_LLs[ epoch ])
if graph:
if validating:
pylab.figure()
pylab.subplot( 211 )
pylab.title( "Training Reestimation Performance" )
pylab.xlabel( "Epochs" )
pylab.ylabel( r"$\log( P ( O | \lambda ) )$" )
pylab.plot( LLs, label="Training data", color='red' )
pylab.subplots_adjust( hspace=0.4 )
pylab.subplot( 212 )
pylab.title( "Validation Reestimation Performance" )
pylab.plot( val_LLs, label="Validation LL", color='blue' )
pylab.xlabel( "Epochs" )
pylab.ylabel( r"$\log( P ( O | \lambda ) )$" )
pylab.axvline( best_epoch, color="black", label="Lowest validation LL", linewidth=2 )
pylab.legend( labelsep=0.01, shadow=1 , loc='lower right' )
pylab.savefig( fname )
else:
pylab.figure()
pylab.title( "Training Reestimation Performance" )
pylab.xlabel( "Epochs" )
pylab.ylabel( r"$\log( P ( O | \lambda ) )$" )
pylab.plot( LLs, label="Training data", color='red' )
pylab.savefig( fname )
print("Total training time: %d secs" % ( int( time.time() - start ) ), "Best epoch: ", best_epoch)
if validating:
hmm.A = best_A
hmm.B = best_B
hmm.Pi = best_Pi
return hmm
###################################################################################
################################ Example ########################################
###################################################################################
def dishonest_casino_test( graph = True ):
# create transition probability matrix
A = numpy.array( [ [ 0.95, 0.05],[ 0.05, 0.95 ] ] )
# create observable probability distribution matrix
B = numpy.array( [ [ 1.0/6, 1.0/6, 1.0/6, 1.0/6, 1.0/6, 1.0/6, ], \
[ 1.0/10, 1.0/10, 1.0/10, 1.0/10, 1.0/10, 1.0/2 ] ] )
# create set of all observabB = [ (-1,.1), (1,.1) ]
A = numpy.array( [ [ 0.99, 0.01 ], \
[ 0.01, 0.99 ] ] )
#le symbols
V =[1, 2, 3, 4, 5, 6]
# instantiate an hmm, note Pi is uniform probability distribution
# by default
hmm = HMM( 2, A=A, B=B, V=V )
# adjust the precision of printing float values
numpy.set_printoptions( precision=4 )
print("\nDishonest Casino Example:\n ")
Obs = [ 1,2,1,6,6 ]
print(hmm)
print()
print('*'*80)
print('*'*80)
print("\nWithout Scaling\n")
print("\nObservation Sequence: %s\n" % ( Obs ))
prob_Obs, Alpha = forward( hmm, Obs, scaling=0 )
print('*'*29)
print("* Forward Algorithm Results *")
print('*'*29 + '\n')
print("p(Obs|hmm) ~ %.7f" % ( prob_Obs ))
print("Alpha's:\n %s\n" % ( Alpha ))
print( '*'*80 + '\n')
Beta = backward( hmm, Obs )
print('*'*30)
print("* Backward Algorithm Results *")
print('*'*30 + '\n')
print("Beta's:\n %s\n" % ( str( Beta ) ))
print('*'*80 + '\n')
Q_star, Delta, Psi = viterbi( hmm, Obs, scaling=0 )
print('*'*29)
print( "* Viterbi Algorithm Results *")#Xi[ i,:,t ] = Xi[ i,:,t ] / Xi[ i,:,: ].sum( 1 )
print( '*'*29 + '\n')
print( "Q* = %s\n" % ( Q_star ))
print( "Delta's:\n %s\n" % ( Delta ))
print( "Psi:\n %s\n" % ( Psi ))
print( '*'*80 + '\n')
print( '*'*80)
print( '*'*80)
print( "\nWith Scaling\n")
print( "\nObservation Sequence: %s\n" % ( Obs ))
log_prob_Obs, Alpha, c = forward( hmm, Obs, scaling=1 )
print( '*'*29)
print( "* Forward Algorithm Results *")
print( '*'*29 + '\n')
print( "p(Obs|hmm) ~ %.7f" % ( numpy.exp( log_prob_Obs ) ))
print( "Alpha's:\n %s\n" % ( Alpha ))
print( '*'*80 + '\n')
Beta = backward( hmm, Obs, c )
print( '*'*30)
print( "* Backward Algorithm Results *")
print( '*'*30 + '\n')
print( "Beta's:\n %s\n" % ( str( Beta ) ))
print( '*'*80 + '\n')
Q_star, Delta, Psi = viterbi( hmm, Obs, scaling=1 )
print( '*'*29)
print( "* Viterbi Algorithm Results *")
print( '*'*29 + '\n')
print( "Q* = %s\n" % ( Q_star ))
print( "Delta's:\n %s\n" % ( Delta ))
print( "Psi:\n %s\n" % ( Psi ))
print( '*'*80 + '\n')
c = []
c.append( Obs )
baum_welch( hmm, c, epochs=15, graph=graph )
###################################################################################
###################################################################################
###################################################################################
#if __name__ == "__main__":
# ## # run the example, you can turn off graphing by setting it to 0
### X = rand.uniform(0,1,10).reshape( (5,2) )
### print norm_df(X)
# dishonest_casino_test( graph = 1 )
#
#def runme():
#
# #based on Mike's DC example
# #transition probabilities
# #A = numpy.array( [ [ 0.95, 0.05],[ 0.1, 0.90 ] ] )
# A = numpy.array( [ [.5,.5],[.5,.5]])
# #emission probabilities
# B = numpy.array( [ [ 1.0/6, 1.0/6, 1.0/6, 1.0/6, 1.0/6, 1.0/6, ], \
# [ 1.0/10, 1.0/10, 1.0/10, 1.0/10, 1.0/10, 1.0/2 ] ] )
# #symbols
# V = [1,2,3,4,5,6]
#
# model = HMM(2,A=A,B=B,V=V)
# numpy.set_printoptions(precision=5) | mit |
andrasfuchs/BioBalanceDetector | Measurements/WaveForms/Experiments/SleepLogging/python/AnalogOut_Play.py | 1 | 3587 | """
DWF Python Example
Author: Digilent, Inc.
Revision: 2018-07-19
Requires:
Python 2.7, 3
"""
import numpy as np
import scipy.io.wavfile
import matplotlib.pyplot as plt
import ctypes
from ctypes import *
import sys
print("Load audio.WAV file")
rate, data = scipy.io.wavfile.read('audio.wav')
print("Rate: "+str(rate))
print("Size: "+str(data.size))
print("Type: " +str(np.dtype(data[0])))
# AnalogOut expects double normalized to +/-1 value
dataf = data.astype(np.float64)
if np.dtype(data[0]) == np.int8 or np.dtype(data[0]) == np.uint8 :
print("Scaling: UINT8")
dataf /= 128.0
dataf -= 1.0
elif np.dtype(data[0]) == np.int16 :
print("Scaling: INT16")
dataf /= 32768.0
elif np.dtype(data[0]) == np.int32 :
print("Scaling: INT32")
dataf /= 2147483648.0
data_c = (ctypes.c_double * len(dataf))(*dataf)
plt.plot(data)
plt.show()
if sys.platform.startswith("win"):
dwf = cdll.dwf
elif sys.platform.startswith("darwin"):
dwf = cdll.LoadLibrary("/Library/Frameworks/dwf.framework/dwf")
else:
dwf = cdll.LoadLibrary("libdwf.so")
# declare ctype variables
hdwf = c_int()
channel = c_int(0) # AWG 1
# print(DWF version
version = create_string_buffer(16)
dwf.FDwfGetVersion(version)
print("DWF Version: "+str(version.value))
# open device
print("Opening first device...")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == 0:
print("Failed to open device")
szerr = create_string_buffer(512)
dwf.FDwfGetLastErrorMsg(szerr)
print(str(szerr.value))
quit()
print("Playing audio...")
iPlay = 0
dwf.FDwfAnalogOutNodeEnableSet(hdwf, channel, 0, c_bool(True))
dwf.FDwfAnalogOutNodeFunctionSet(hdwf, channel, 0, c_int(31)) #funcPlay
dwf.FDwfAnalogOutRepeatSet(hdwf, channel, c_int(1))
sRun = 1.0*data.size/rate
print("Length: "+str(sRun))
dwf.FDwfAnalogOutRunSet(hdwf, channel, c_double(sRun))
dwf.FDwfAnalogOutNodeFrequencySet(hdwf, channel, 0, c_double(rate))
dwf.FDwfAnalogOutNodeAmplitudeSet(hdwf, channel, 0, c_double(1.0))
# prime the buffer with the first chunk of data
cBuffer = c_int(0)
dwf.FDwfAnalogOutNodeDataInfo(hdwf, channel, 0, 0, byref(cBuffer))
if cBuffer.value > data.size : cBuffer.value = data.size
dwf.FDwfAnalogOutNodeDataSet(hdwf, channel, 0, data_c, cBuffer)
iPlay += cBuffer.value
dwf.FDwfAnalogOutConfigure(hdwf, channel, c_bool(True))
dataLost = c_int(0)
dataFree = c_int(0)
dataCorrupted = c_int(0)
sts = c_ubyte(0)
totalLost = 0
totalCorrupted = 0
while True :
# fetch analog in info for the channel
if dwf.FDwfAnalogOutStatus(hdwf, channel, byref(sts)) != 1:
print("Error")
szerr = create_string_buffer(512)
dwf.FDwfGetLastErrorMsg(szerr)
print(szerr.value)
break
if sts.value != 3: break # not running !DwfStateRunning
if iPlay >= data.size : continue # no more data to stream
dwf.FDwfAnalogOutNodePlayStatus(hdwf, channel, 0, byref(dataFree), byref(dataLost), byref(dataCorrupted))
totalLost += dataLost.value
totalCorrupted += dataCorrupted.value
if iPlay + dataFree.value > data.size : # last chunk might be less than the free buffer size
dataFree.value = data.size - iPlay
if dataFree.value == 0 : continue
if dwf.FDwfAnalogOutNodePlayData(hdwf, channel, 0, byref(data_c, iPlay*8), dataFree) != 1: # offset for double is *8 (bytes)
print("Error")
break
iPlay += dataFree.value
print("Lost: "+str(totalLost))
print("Corrupted: "+str(totalCorrupted))
print("done")
dwf.FDwfAnalogOutReset(hdwf, channel)
dwf.FDwfDeviceClose(hdwf) | gpl-3.0 |
Aryan-Barbarian/bigbang | bigbang/repo_loader.py | 3 | 7776 | from git_repo import GitRepo, MultiGitRepo
import json;
import os;
import re;
import subprocess;
import sys;
import pandas as pd
import requests
import fnmatch
from IPython.nbformat import current as nbformat
from IPython.nbconvert import PythonExporter
import networkx as nx
import compiler
from compiler.ast import From
from compiler.ast import Import
from config.config import CONFIG
repoLocation = CONFIG.repo_path
examplesLocation = CONFIG.urls_path
nameRegex = re.compile('([^/]*)(\\.git$)')
fileRegex = re.compile('.*\/(.*)')
def repo_already_exists(filepath):
return os.path.exists(filepath);
"""
Converts a github url (e.g. https://github.com/sbenthall/bigbang.git) to
a human-readable name (bigbang) by looking at the word between the last "/" and ".git".
"""
def url_to_name(url):
url = url.replace("\n", "");
name = nameRegex.search(url).group(1);
return name;
"""
Converts a name of a repo to its filepath.
Currently, these go to ../archives/sample_git_repos/{name}/
"""
def name_to_filepath(name):
newLoc = repoLocation + name
return newLoc
"""
Converts a filepath (../archives/sample_git_repos/{name}) to a name.
Note that this will fail if the filepath ends in a "/". It must end
in the name of the folder.
Thus, it should be ../archives/sample_git_repos/{name} not
../archives/sample_git_repos/{name}/
"""
def filepath_to_name(filepath):
name = fileRegex.search(filepath).group(1);
return name;
"""
Converts a dictionary of dependencies into a NetworkX DiGraph.
"""
def create_graph(dic):
G = nx.DiGraph()
for f in dic:
for dependency in dic[f]:
G.add_edge(f, dependency)
return G
"""
Returns a list of the Python files in a directory, and
converts IPython notebooks into Python source code and
includes them with the Python files.
"""
def get_files(filepath):
os.chdir(filepath)
files = []
for root, dirnames, filenames in os.walk("."):
for filename in fnmatch.filter(filenames, '*.py'):
files.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.ipynb'):
try:
with open(filename) as fh:
nb = nbformat.reads_json(fh.read())
export_path = filename.replace(".ipynb", ".py")
exporter = PythonExporter()
source, meta = exporter.from_notebook_node(nb)
with open(export_path, 'w+') as fh:
fh.writelines(source)
files.append()
except: #may have issues with JSON encoding
pass
return files
"""
Given a directory, collects all Python and IPython files and
uses the Python AST to create a dictionary of dependencies from them.
Returns the dependencies converted into a NetworkX graph.
"""
def get_dependency_network(filepath):
files = get_files(filepath)
dependencies = {}
for file in set(files):
ast = compiler.parseFile(file)
for node in ast.getChildren()[1].nodes:
if isinstance(node, Import):
if file in dependencies:
dependencies[file].append(node.names[0][0])
else:
dependencies[file] = [node.names[0][0]]
elif isinstance(node, From):
if file in dependencies:
dependencies[file].append(node.modname + "/" + node.names[0][0])
return create_graph(dependencies)
"""
Takes three different options for type:
'remote' : basically a git url
'name' (default): a name like 'scipy' which the method can expand to a url
'local' : a filepath to a file on the local system (basically an existing git directory on this computer)
This returns an initialized GitRepo object with its data and name already loaded.
"""
def get_repo(repo_in, in_type='name', update = False):
# Input is name
if in_type == 'name':
filepath = name_to_filepath(repo_in)
ans = None;
if not update:
ans = get_cache(repo_in);
if ans is not None:
return ans;
print("Checking for " + str(repo_in) + " at " + str(filepath));
ans = get_repo(filepath, 'local', update);
if isinstance(ans, GitRepo):
ans.commit_data.to_csv(cache_path(repo_in), sep='\t', encoding='utf-8') # We cache it hopefully???
else:
print("We failed to find a local copy of this repo")
return ans;
# Input is a local file
if in_type == 'local':
if repo_already_exists(repo_in):
name = filepath_to_name(repo_in);
return GitRepo(url=repo_in, name=name);
else:
print("Invalid filepath: " + repo_in);
return None;
if in_type == 'remote':
name = url_to_name(repo_in);
filepath = name_to_filepath(name);
if not repo_already_exists(filepath):
print("Gloning the repo from remote")
fetch_repo(repo_in);
return get_repo(name, 'name', update);
else:
print("Invalid input") # TODO: Clarify this error
return None
"""
Takes in a git url and uses shell commands
to clone the git repo into sample_git_repos/
TODO: We shouldn't use this with shell=True because of security concerns.
"""
def fetch_repo(url):
# TODO: We are repeatedly calculating name and filepath
url = url.replace("\n", "");
name = url_to_name(url);
newLoc = name_to_filepath(name);
command = ["git " + "clone " + url + " " + newLoc];
subprocess.call(command, shell = True);
"""
Takes in a name (bigbang)
Returns where its cached file should be (../sample_git_repos/bigbang_backup.csv)
"""
def cache_path(name):
return repoLocation + str(name) + "_backup.csv"
"""
Takes in a name (bigbang)
Returns a GitRepo object containing the cache data if the cache exists
Returns None otherwise.
"""
def get_cache(name):
filepath = cache_path(name);
if os.path.exists(filepath):
c = pd.read_csv(filepath, sep='\t', encoding='utf-8');
fp = name_to_filepath(name);
ans = GitRepo(name=name, url=fp, cache=c);
return ans;
return None;
"""
As of now, this only accepts names/repos, not local urls
TODO: This could be optimized
"""
def get_multi_repo(repo_names=None, repos=None):
if repos is None:
repos = list()
for name in repo_names:
repo = get_repo(name, in_type = "name")
repos.append(repo);
for repo in repos:
repo.commit_data["Repo Name"] = repo.name;
ans = MultiGitRepo(repos);
return ans
"""
fetches a list of all repos in an organization from github
and gathers their URL's (of the form *.git)
It dumps these into ../examples/{org_name}_urls.txt
"""
def load_org_repos(org_name):
github_url = "https://api.github.com/orgs/" + org_name + "/repos"
r = requests.get(github_url)
data = r.json()
urls = []
for repo in data:
if "git_url" in repo:
urls.append(repo["git_url"])
if len(urls) == 0:
print("Found no repos in group: " + str(org_name))
return None
else:
addr = examplesLocation + str(org_name) + "_urls.txt"
f = open(addr, 'w')
f.write("\n".join(urls))
print("Wrote git urls to " + addr)
return urls
"""
Checks to see if we have the urls for a given org
If we don't, it fetches them.
Once we do, it returns a list of GitRepo objects from the urls.
"""
def get_org_repos(org_name):
addr = examplesLocation + str(org_name) + "_urls.txt"
urls = None
if not os.path.isfile(addr):
urls = load_org_repos(org_name);
else:
urls = open(addr, "r")
ans = list()
for url in urls:
ans.append(get_repo(url, "remote"))
return ans;
def get_org_multirepo(org_name):
repos = get_org_repos(org_name)
ans = get_multi_repo(repos=repos)
return ans
| gpl-2.0 |
adit-chandra/tensorflow | tensorflow/python/client/notebook.py | 61 | 4779 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = (
[sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
nav13n/Data-Science-45min-Intros | support-vector-machines-101/rbf-circles.py | 26 | 1504 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__="Josh Montague"
__license__="MIT License"
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn as sns
except ImportError as e:
sys.stderr.write("seaborn not installed. Using default matplotlib templates.")
from sklearn.svm import SVC
from sklearn.datasets import make_circles
# adapted from:
# http://scikit-learn.org/stable/auto_examples/svm/plot_svm_kernels.html
# http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html
xx, yy = make_circles(n_samples=500, factor=0.1, noise=0.15)
clf = SVC(kernel='rbf')
clf.fit(xx, yy)
plt.figure(figsize=(8,6))
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
facecolors='none', zorder=10, s=300)
plt.scatter(xx[:, 0], xx[:, 1], c=yy, zorder=10, cmap=plt.cm.Paired, s=100)
#plt.scatter(xx[:, 0], xx[:, 1], c=yy, zorder=10, s=100)
plt.axis('tight')
x_min = -1.5
x_max = 1.5
y_min = -1.5
y_max = 1.5
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
#plt.figure(fignum, figsize=(4, 3))
#plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.pcolormesh(XX, YY, Z > 0, alpha=0.1)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.title('rbf kernel')
plt.show()
| unlicense |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/io/json/test_normalize.py | 14 | 11514 | import pytest
import numpy as np
import json
import pandas.util.testing as tm
from pandas import compat, Index, DataFrame
from pandas.io.json import json_normalize
from pandas.io.json.normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [{'country': 'USA',
'states': [{'name': 'California',
'cities': [{'name': 'San Francisco',
'pop': 12345},
{'name': 'Los Angeles',
'pop': 12346}]
},
{'name': 'Ohio',
'cities': [{'name': 'Columbus',
'pop': 1234},
{'name': 'Cleveland',
'pop': 1236}]}
]
},
{'country': 'Germany',
'states': [{'name': 'Bayern',
'cities': [{'name': 'Munich', 'pop': 12347}]
},
{'name': 'Nordrhein-Westfalen',
'cities': [{'name': 'Duesseldorf', 'pop': 1238},
{'name': 'Koeln', 'pop': 1239}]}
]
}
]
@pytest.fixture
def state_data():
return [
{'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}],
'info': {'governor': 'Rick Scott'},
'shortname': 'FL',
'state': 'Florida'},
{'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}],
'info': {'governor': 'John Kasich'},
'shortname': 'OH',
'state': 'Ohio'}]
class TestJSONNormalize(object):
def test_simple_records(self):
recs = [{'a': 1, 'b': 2, 'c': 3},
{'a': 4, 'b': 5, 'c': 6},
{'a': 7, 'b': 8, 'c': 9},
{'a': 10, 'b': 11, 'c': 12}]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties', meta='state')
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({'A': {'A': 1, 'B': 2}})
expected = DataFrame([[1, 2]], columns=['A.A', 'A.B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep='_')
expected = DataFrame([[1, 2]], columns=['A_A', 'A_B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({'A': {'A': 1, 'B': 2}}, sep=u'\u03c3')
expected = DataFrame([[1, 2]], columns=[u'A\u03c3A', u'A\u03c3B'])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']],
sep='_')
expected = Index(['name', 'pop',
'country', 'states_name']).sort_values()
assert result.columns.sort_values().equals(expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(deep_nested, ['states', 'cities'],
meta=['country', ['states', 'name']])
# meta_prefix={'states': 'state_'})
ex_data = {'country': ['USA'] * 4 + ['Germany'] * 3,
'states.name': ['California', 'California', 'Ohio', 'Ohio',
'Bayern', 'Nordrhein-Westfalen',
'Nordrhein-Westfalen'],
'name': ['San Francisco', 'Los Angeles', 'Columbus',
'Cleveland', 'Munich', 'Duesseldorf', 'Koeln'],
'pop': [12345, 12346, 1234, 1236, 12347, 1238, 1239]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [{'state': 'Florida',
'shortname': 'FL',
'info': {
'governor': 'Rick Scott'
},
'counties': [{'name': 'Dade', 'population': 12345},
{'name': 'Broward', 'population': 40000},
{'name': 'Palm Beach', 'population': 60000}]},
{'state': 'Ohio',
'shortname': 'OH',
'info': {
'governor': 'John Kasich'
},
'counties': [{'name': 'Summit', 'population': 1234},
{'name': 'Cuyahoga', 'population': 1337}]}]
result = json_normalize(data, 'counties',
['state', 'shortname',
['info', 'governor']])
ex_data = {'name': ['Dade', 'Broward', 'Palm Beach', 'Summit',
'Cuyahoga'],
'state': ['Florida'] * 3 + ['Ohio'] * 2,
'shortname': ['FL', 'FL', 'FL', 'OH', 'OH'],
'info.governor': ['Rick Scott'] * 3 + ['John Kasich'] * 2,
'population': [12345, 40000, 60000, 1234, 1337]}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [{'foo': 'hello',
'bar': 'there',
'data': [{'foo': 'something', 'bar': 'else'},
{'foo': 'something2', 'bar': 'else2'}]}]
with pytest.raises(ValueError):
json_normalize(data, 'data', meta=['foo', 'bar'])
result = json_normalize(data, 'data', meta=['foo', 'bar'],
meta_prefix='meta')
for val in ['metafoo', 'metabar', 'foo', 'bar']:
assert val in result
def test_record_prefix(self, state_data):
result = json_normalize(state_data[0], 'counties')
expected = DataFrame(state_data[0]['counties'])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, 'counties',
meta='state',
record_prefix='county_')
expected = []
for rec in state_data:
expected.extend(rec['counties'])
expected = DataFrame(expected)
expected = expected.rename(columns=lambda x: 'county_' + x)
expected['state'] = np.array(['Florida', 'Ohio']).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_non_ascii_key(self):
if compat.PY3:
testjson = (
b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},' +
b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'
).decode('utf8')
else:
testjson = ('[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'
'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]')
testdata = {
u'sub.A': [1, 3],
u'sub.B': [2, 4],
b"\xc3\x9cnic\xc3\xb8de".decode('utf8'): [0, 1]
}
expected = DataFrame(testdata)
result = json_normalize(json.loads(testjson))
tm.assert_frame_equal(result, expected)
class TestNestedToRecord(object):
def test_flat_stays_flat(self):
recs = [dict(flat1=1, flat2=2),
dict(flat1=3, flat2=4),
]
result = nested_to_record(recs)
expected = recs
assert result == expected
def test_one_level_deep_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1}
assert result == expected
def test_nested_flattens(self):
data = dict(flat1=1,
dict1=dict(c=1, d=2),
nested=dict(e=dict(c=1, d=2),
d=2))
result = nested_to_record(data)
expected = {'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
assert result == expected
def test_json_normalize_errors(self):
# GH14583: If meta keys are not always present
# a new option to set errors='ignore' has been implemented
i = {
"Trades": [{
"general": {
"tradeid": 100,
"trade_version": 1,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}, {
"general": {
"tradeid": 100,
"stocks": [{
"symbol": "AAPL",
"name": "Apple",
"price": "0"
}, {
"symbol": "GOOG",
"name": "Google",
"price": "0"
}
]
}
}
]
}
j = json_normalize(data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='ignore')
expected = {'general.trade_version': {0: 1.0, 1: 1.0, 2: '', 3: ''},
'general.tradeid': {0: 100, 1: 100, 2: 100, 3: 100},
'name': {0: 'Apple', 1: 'Google', 2: 'Apple', 3: 'Google'},
'price': {0: '0', 1: '0', 2: '0', 3: '0'},
'symbol': {0: 'AAPL', 1: 'GOOG', 2: 'AAPL', 3: 'GOOG'}}
assert j.fillna('').to_dict() == expected
pytest.raises(KeyError,
json_normalize, data=i['Trades'],
record_path=[['general', 'stocks']],
meta=[['general', 'tradeid'],
['general', 'trade_version']],
errors='raise'
)
| mit |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Dynamic_Shear_Behaviour/Frictional_SDOF_With_Damping/c_t_10/NonLinHardShear/Displacement_Response.py | 12 | 2048 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 28})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=24
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=24
###############################################################
## Analytical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Analytical_Displacement.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.figure(figsize=(12,10))
plt.plot(times,disp,'-r',label='Analytical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
###############################################################
## Numerical Solution
###############################################################
# Go over each feioutput and plot each one.
thefile = "Frictional_SDOF_freeVibration.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][24,:]
# Plot the figure. Add labels and titles.
plt.plot(times,disp,'-k',label='Numerical Solution', Linewidth=4)
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
########################################################
# # axes = plt.gca()
# # axes.set_xlim([-7,7])
# # axes.set_ylim([-1,1])
outfigname = "Displacement_Response.pdf";
legend = plt.legend()
legend.get_frame().set_linewidth(0.0)
legend.get_frame().set_facecolor('none')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
walterreade/scikit-learn | examples/text/document_clustering.py | 42 | 8335 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
raymond91125/tissue_enrichment_tool_hypergeometric_test | pea_paper_docs/src/dictionary_generator.py | 4 | 21443 | """Contains classes solr_query, node and sisters."""
# -*- coding: utf-8 -*-
from urllib.request import urlopen
import simplejson
import json
import numpy as np
import pandas as pd
import contextlib
# import copy
class solr_query():
"""
A solr_query class that stores URLs.
Attbts:
solr_url - the main solr_url
"""
def __init__(self, solr_url, query):
"""Initialize the solr_query object."""
self.solr_url = solr_url
self.query = query
def set_solr_url(self, url):
"""Assign a url to the solr_query object."""
self.solr_url = url
def add_query_url(self, url):
"""Add a query url to the solr_query object."""
self.query = url
def open_query(self, p=0, timeout=10):
"""
Given a query, append it to the main url.
Open URL and use simplejson to load the results
"""
timer = 10 # don't try more than 10x per website
if p:
print(self.solr_url + self.query)
while timer > 0:
try:
with contextlib.closing(urlopen(self.solr_url +
self.query, timeout=timeout)) as conn:
return simplejson.load(conn)
except:
# raise Warning('URL is invalid or may have timed out')
timer -= 1
pass
# raise an error if the timer reached 0
if timer == 0:
print(self.solr_url + self.query)
raise Warning('Url could not be contacted or is invalid')
class node():
"""
A node is intended to be a single ontology term.
Attributes:
name - wbbt id
parents
genes
similarity - no. of genes assoc. with this node divided
by the set of genes of its sister set
drop -- whether to drop or not
good_name -- human readable plus wbbt
QUERIES FOR RELATIONS AND GENES ARE LAMBDA FUNCTIONS
query_relation(x) -- gets families of tissue x
query_genes(x) -- gets genes assoc with x
query_readable
"""
def __init__(self, name):
"""Initialization function."""
self.name = name
self.daughters = []
self.parents = []
self.genes = []
self.similarity = 0
self.drop = False
self.good_name = ''
def get_name(self, human_readable):
"""Generate a good name (human readable + WBid)."""
if human_readable == '':
print('warning, empty human readable name')
self.good_name = human_readable + ' ' + self.name
def add_daughter(self, daughter):
"""Add a daughter to this node."""
self.daughters.append(daughter)
self.daughters = list(set(self.daughters)) # prevent redundancy
def add_parent(self, parent):
"""Add a parent to this node."""
self.parents.append(parent)
self.parents = list(set(self.parents))
def add_annotation(self, gene):
"""Add annotation to this node."""
self.genes.append(gene)
self.genes = list(set(self.genes))
def throw_away(self):
"""Set the `drop` variable to True."""
self.drop = True
def calc_similarity(self, sim):
"""Calculate similarity."""
self.similarity = sim
def find_family(self, solr_url, query_relation, p=0):
"""
Find the family for this node by using solr_url and query_relation.
query_relation(x) --lambda function
"""
# get the json object
rsp_rlshp = solr_query(solr_url, query_relation(self.name))
# extract the array with all the right information
array_of_rlshps = rsp_rlshp.open_query(p=p)['response']['docs'][0]
# go through the array, turning each line into a dictionary
# these mini-dictionaries contain the edges between nodes
for j in json.loads(array_of_rlshps['topology_graph_json'])['edges']:
# if the object isnt the same as the wbbt, object is parent to wbbt
# if object is same as wbbt, wbbt is parent to subject
if self.name != j['obj']:
self.add_parent(j['obj'])
else:
self.add_daughter(j['sub'])
def find_genes(self, solr_url, query_genes):
"""
For a given wbbt, find the genes associated with it.
query_genes(x) -- lambda function!
"""
rsp_genes = solr_query(solr_url, query_genes(self.name))
# extract the array with all the right information
array_of_genes = rsp_genes.open_query()['response']['docs']
# go through the array, turning each line into a dictionary
for entry in array_of_genes:
self.genes.append(entry['id'][3:]) # remove WB: from the string
self.genes = list(set(self.genes))
class sisters(object):
"""
A sister object that contains related terms.
A sister object that is meant to contain a set of terms that are related
Sisters are defined as a set of nodes that share a single parent
If a node is multiparent, it can have as many different sister sets as
parents.
Attributes:
parent -- the parent for this set
sisters -- set of `node` objects that are related by the same parent
geneset -- total set of genes associated with these sisters
threshold -- similarity threshold that specifies above which similarity
sisters must be killed
dropsisters -- boolean
dropped -- an array that keeps track of all sisters ever dropped
"""
def __init__(self, parent, threshold):
"""Initialize function."""
self.parent = parent
self.sisters = []
self.geneset = []
self.threshold = threshold
self.dropsisters = 0
self.dropped = []
def add_sister(self, sister):
"""Add a sister."""
if self.sisters:
self.sisters.append(sister)
else:
self.sisters = [sister]
self.geneset = list(set(self.geneset+(sister.genes)))
def add_sisters(self, sisters):
"""Add multiple sisters."""
self.sisters = list(set(self.sisters+sisters))
for sister in sisters:
self.geneset = self.geneset+sister.genes
self.geneset = list(set(self.geneset))
def add_dropped(self, sister):
"""Add a sister to the `dropped` list."""
if sister not in list:
self.dropped.append(sister)
else:
self.dropped = self.dropped+sister
def calc_similarity(self, method):
"""
Calculate the family wise similarity for this object.
A method to calculate the similarity of a set of sisters to each other
by finding the cardinality of the total gene set and the cardinality of
the gene set for each node
Depending on the method, the sisters.dropsisters value is modified if
the sisters are too similar to each other
"""
if len(self.sisters) == 0:
return 0
if self.geneset == 0:
return 1
if method not in ['avg', 'any']:
raise ValueError('method must be one of \'avg\' or \'any\'')
avg = 0
for sister in self.sisters:
sim = len(sister.genes)/len(self.geneset)
sister.calc_similarity(sim)
if method == 'any':
if sim > self.threshold:
self.dropsisters = 1
avg += sim
avg = avg/len(self.sisters)
if method == 'avg':
if avg > self.threshold:
self.dropsisters = 1
def kill(self):
"""If dropsister variable is 1, set `dropped` = 'sisters'."""
if self.dropsisters == 1:
self.dropped = self.sisters
def trim(self, val):
"""If sister doesn't have `val` genes assoc. with it, drop it."""
if len(self.sisters) == 0:
return
for sister in self.sisters:
if len(sister.genes) < val:
self.dropped.append(sister)
class ontology():
"""An ontological object."""
def __init__(self, name, cutoff, threshold, method, solr_url):
"""Initialization function."""
self.name = name
self.threshold = threshold
self.method = method
self.nodes = {}
self.family = {}
self.solr_url = solr_url
self.query_min_cutoff = 5
self.cutoff = cutoff
self.dropped = {}
self.good = {}
def set_min_cutoff(self, x):
"""Set minimum gene cutoff below which nodes are not fetched."""
self.query_min_cutoff = x
def add_nodes(self, query_terms, query_readable):
"""Add nodes from solr database."""
sq = solr_query(self.solr_url, query_terms(self.query_min_cutoff))
rsp_terms = sq.open_query()
sd = solr_query(self.solr_url, query_readable)
rsp_read = sd.open_query()
i = 0
for k in enumerate(rsp_terms['facet_counts']
['facet_fields']['regulates_closure']):
if i % 2 == 0:
n = node(k[1])
if n.name not in self.nodes:
self.nodes[n.name] = n
self.nodes[n.name].get_name(query_readable)
if n.name not in self.family:
self.family[n.name] = sisters(n.name, self.threshold)
i += 1
for k, val in enumerate(rsp_read['response']['docs']):
if val['id'] not in self.nodes:
continue
self.nodes[val['id']].get_name(val['annotation_class_label'])
def find_node_family(self, lambda_query_rlshp, p=0):
"""Find the nodes that are related to this one."""
for n in iter(self.nodes):
self.nodes[n].find_family(self.solr_url, lambda_query_rlshp, p=p)
def find_node_annotations(self, lambda_query_genes):
"""Fetch the annotations for this node."""
for n in iter(self.nodes):
self.nodes[n].find_genes(self.solr_url, lambda_query_genes)
if len(self.nodes[n].genes) < self.cutoff:
self.dropped[self.nodes[n].name] = self.nodes[n]
def annotate_nodes(self, lambda_query_rlshp, lambda_query_genes):
"""Annotate this node with a family and with annotations."""
self.find_node_family(lambda_query_rlshp)
self.find_node_annotations(lambda_query_genes)
def find_families(self):
"""Figure out the family structure for each node."""
for node in self.nodes:
n = self.nodes[node]
for daughter in n.daughters:
if daughter not in self.nodes:
continue
# if 'WBbt:0002367' == daughter:
# print('hi')
if len(self.nodes[daughter].genes) < self.threshold:
# add sister
self.family[n.name].add_sister(self.nodes[daughter])
# place it in sister.dropped
self.family[n.name].add_dropped(self.nodes[daughter])
# but also in self.dropped
self.dropped[n.name] = n
else:
self.family[n.name].add_sister(self.nodes[daughter])
def calculate_similarities(self):
"""Calculate the family-wise similarity."""
for parent in self.family:
self.family[parent].calc_similarity(self.method)
def kill(self):
"""Remove whatever nodes fulfill the sisters.kill criterion."""
for parent in self.family:
self.family[parent].kill()
for killed in self.family[parent].dropped:
if killed.name in self.nodes:
self.dropped[killed.name] = killed
def ceiling(self):
"""If a node has all its complement of daughters, kill it."""
for parent in self.family:
if parent not in self.nodes:
continue
if len(self.family[parent].sisters) == 0:
continue
if len(self.family[parent].dropped) == 0:
self.dropped[self.nodes[parent].name] = self.nodes[parent]
def find_good(self):
"""Fetch the surviving nodes."""
for node in self.nodes:
if node not in self.dropped:
self.good[self.nodes[node].good_name] = self.nodes[node]
def build_dictionary(wbbts, tissue_array, genes):
"""Build the dictionary from a list of terms and wbbts."""
# given a list of tissues, find the genes associated with each tissue and
# place them in a vector.....
mat = np.zeros(shape=(len(genes), len(wbbts)))
d = {}
for i, gene in enumerate(genes):
d[gene] = i
# for j, tissue in enumerate(wbbts):
# if gene in wbbts[tissue].genes:
# mat[i, j] = 1
for j, tissue in enumerate(wbbts):
for gene in wbbts[tissue].genes:
mat[d[gene], j] = 1
cols = tissue_array
df = pd.DataFrame(mat, columns=cols)
df.insert(0, 'wbid', genes)
# drop the root term, for some reason it causes problems with hgt
if 'C. elegans Cell and Anatomy WBbt:0000100' in df.columns:
df.drop('C. elegans Cell and Anatomy WBbt:0000100', axis=1,
inplace=True)
return df
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# # # # # # # #
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
if __name__ == '__main__':
# Raymond:
# I have split up the URLs into 2 different variables to make life easier
# solr_url contains the first part
# query_xxx contains the second. However, query_xx can be a lambda function
# basically, at a point in the string, I have written something like...
# 'select?qt=standard&indent={0}'.format(x) -- the {0} is replaced by x
# this allows me to modify the query in predictable ways.
# hope this is clear.
import argparse
import sys
parser = argparse.ArgumentParser(description='Run Dictionary Maker')
parser.add_argument("threshold", help='The redundancy threshold',
type=float)
parser.add_argument('cutoff', help='The annotation cutoff for each term',
type=int)
parser.add_argument("fname",
help='Filename (complete with path) to save to',
type=str)
parser.add_argument("-m", '--method',
help='method - defaults to \'any\' if not specified',
type=str)
parser.add_argument("-mc", '--mincutoff',
help='The minimum cutoff to fetch. Defaults to 2.',
type=int)
parser.add_argument("-su", '--solrurl',
help='The main body of the solr url.', type=str)
parser.add_argument("-o", "--ontology",
help='One of `phenotype`, `tissue` or `gene`. Only\
works if --solrurl has not been specified',
type=str, default='anatomy',
choices=['anatomy', 'phenotype', 'go'])
args = parser.parse_args()
# main solr url
if args.solrurl:
solr_url = args.solrurl
else:
# solr_url = 'http://wobr.caltech.edu:8082/solr/anatomy/'
s = 'http://wobr.caltech.edu:8082/solr/{0}/'
solr_url = s.format(args.ontology)
# queries must be lambda functions
# query for terms. Finds terms that have x or more annotating genes
def query_terms(x, ontology=args.ontology):
"""Search solr for terms (nodes) in the ontology."""
if ontology != 'go':
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=0&q=document_category:bioentity' +\
'&facet=true&facet.field=regulates_closure&' +\
'facet.limit=-1&facet.mincount={0}&facet.sort' +\
'=count&fq=source:%22WB%22&fq=-qualifier:%22not%22'
else:
s = 'select?qt=standard&indent=on&wt=json&version=2.2&fl=' +\
'id&start=0&rows=1&q=document_category:bioentity&facet=' +\
'true&facet.field=regulates_closure&facet.limit=-1&' +\
'facet.mincount={0}&facet.sort=count&fq=source:%22WB' +\
'%22&fq=taxon:%22NCBITaxon:6239%22&fq=-qualifier:%22not%22'
return s.format(x)
def query_relation(x, ontology=args.ontology):
"""
query for relationships between nodes.
given a wbbt ID `x`, find the nodes connected to it.
Links are slightly different for [anatomy, phenotype] and GO, because
in WormBase, the GO solr database includes all other worm species as
well.
"""
if ontology != 'go':
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
else:
s = "select?qt=standard&fl=topology_graph_json&" +\
"version=2.2&wt=json&indent=on&rows=1&q=id:" +\
"%22{0}%22&fq=document_category:%22ontology_class%22"
return s.format(x)
def query_genes(x, ontology=args.ontology):
"""
find the genes associated with every node.
given a wbbt ID `x`, open URL that contains genes assoc. with it.
"""
if ontology != 'go':
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
else:
s = "select?qt=standard&indent=on&wt=json&version=2.2&" +\
"fl=id&start=0&rows=10000&q=document_category:bioentity" +\
"&fq=source:%22WB%22&fq=taxon:%22NCBITaxon:6239%22" +\
"&fq=-qualifier:%22not%22&" +\
"fq=regulates_closure:%22{0}%22"
return s.format(x)
# query for readable names
query_readable = "select?qt=standard&fl=id,annotation_class_label" +\
"&version=2.2&wt=json&indent=on&rows=100000&q=id:" +\
"*&fq=document_category:ontology_class&" +\
"fq=-is_obsolete:true"
queries = [query_terms, query_relation, query_genes, query_readable]
threshold = args.threshold
cutoff = args.cutoff
if args.method:
method = args.method
else:
method = 'any'
if args.mincutoff:
min_annot = args.mincutoff
else:
min_annot = 2
trial1 = ontology('tissue_ontology', cutoff, threshold, method, solr_url)
print('Object made')
print('Min cutoff set at: {0}....'.format(min_annot))
sys.stdout.flush()
trial1.set_min_cutoff(min_annot)
print('Fetching nodes.....')
sys.stdout.flush()
trial1.add_nodes(query_terms, query_readable)
print('Annotating nodes')
sys.stdout.flush()
trial1.find_node_annotations(query_genes)
print('Finding node families...')
sys.stdout.flush()
trial1.find_node_family(query_relation)
print('Generating node family representation...')
sys.stdout.flush()
trial1.find_families()
message = 'Calculating similarities and \
removing nodes with more than {0:.2} similarity...'
print(message.format(threshold))
sys.stdout.flush()
trial1.calculate_similarities()
message = 'killing nodes that have less than {0} annotations...'
print(message.format(cutoff))
sys.stdout.flush()
trial1.kill()
print('Applying ceiling...')
sys.stdout.flush()
trial1.ceiling()
print('Generating final list of terms...')
trial1.find_good()
print('No. of terms in dictionary: {0}'.format(len(trial1.good)))
# extract keys
print('Generating file at {0}'.format(args.fname))
tissues = []
genes = []
for n in trial1.good:
tissues.append(n)
# print(n)
genes = genes+trial1.good[n].genes
genes = list(set(genes))
df = build_dictionary(trial1.good, tissues, genes)
df.to_csv(args.fname, index=False)
| mit |
mdaal/KAM | Quantum_Model_2.py | 1 | 15171 | import numpy as np
from matplotlib import pylab, mlab, pyplot
plt = pyplot
MHz = np.power(10,6)
# w_0 = 2*np.pi*f_0
# psi_1 = 0
# phi_r = np.pi / 2
# K = -1.0 * np.power(10,-4.0)*w_0
# gamma_nl = 0.01*K/np.sqrt(3)
# gamma_r = 0.01 * w_0
# gamma_l = 1.1 * gamma_r
# sigmap_12 = 0.5
# sigmap_13 = np.power(0.5,0.5)
# b1c_in = np.sqrt((4/(3*np.sqrt(3))) * np.power(sigmap_13*gamma_r,-1) * np.power((gamma_r+gamma_l)/(np.abs(K)-np.sqrt(3)*gamma_nl), 3) * (K*K + gamma_nl*gamma_nl)) #Threshold of bi-stability
# b1_in = .8* b1c_in
#computed values:
# psi_2, V3, phi_B, b2_out
Manual = False
Use_Run_45a = 1
if Use_Run_45a:
#fit, fig, ax = Run45aP.nonlinear_fit(Save_Fig = True, Indexing = (None,-1,None))
bestfit = 'Powell'
eta = fit[bestfit].x[4] #
delta = fit[bestfit].x[5]#
f_0 = fit[bestfit].x[0] #
Qtl = fit[bestfit].x[1]#
Qc = fit[bestfit].x[2]#
phi31 = fit[bestfit].x[3]
Z1 = Run45aP.metadata.Feedline_Impedance
Z3 = Run45aP.metadata.Resonator_Impedance
V30 = np.sqrt(fit['V30V30'])
Pprobe_dBm = -54
#dBm
phiV1 = 0
if Manual:
f_0 = 700*MHz
delta =0.009 #freq nonlinearity
eta = 10#5.9 #Q nonlinearity
V30 = 0.1
Qtl = 100e3
Qc = 30e3
Z3 = 50.0
Z1 = 50.0
Pprobe_dBm = -65
phi31 = np.pi/2.05 #np.pi/2.015
phiV1 = 1*np.pi/10
Z2 = Z1
Pprobe = 0.001* np.power(10.0,Pprobe_dBm/10.0)
V1V1 = Pprobe *2*Z1
V1 = np.sqrt(V1V1) * np.exp(np.complex(0,1)*phiV1)
V30V30 = np.square(np.abs(V30))
Q = 1.0/ ((1.0/Qtl) + (1.0/Qc))
#tau = 50e-9 #cable delay - only affect V3, but not V2
################################# Create f array making sure it contains f_0
numBW = 20
BW = numBW*f_0/Q # 2*(f_0 * 0.25)
num = 3000
if 1: #Triangular numbers
T = np.linspace(1, num, num=num, endpoint=True, retstep=False, dtype=None)
T = T*(T+1.0)/2.0
f_plus = (T*(BW/2)/T[-1]) + f_0
f_minus = (-T[::-1]/T[-1])*(BW/2) + f_0
f = np.hstack((f_minus,f_0,f_plus))
if 0: #linear
f_plus = np.linspace(f_0, f_0 + BW/2, num=num, endpoint=True, retstep=False, dtype=None)
f_minus = np.linspace(f_0 - BW/2,f_0, num=num-1, endpoint=False, retstep=False, dtype=None)
f = np.hstack((f_minus,f_plus))
if 0: #logerithmic
f_plus = np.logspace(np.log10(f_0), np.log10(f_0 + BW/2), num=num, endpoint=True, dtype=None)
f_minus = -f_plus[:0:-1] + 2*f_0
f = np.hstack((f_minus,f_plus))
#################################
Number_of_Roots = 3
V3V3 = np.ma.empty((f.shape[0],Number_of_Roots), dtype = np.complex128)
V3 = np.ma.empty_like(V3V3)
exp_phi_V3 = np.ma.empty_like(V3V3)
V2_out = np.ma.empty_like(V3V3)
V3V3_up = np.empty_like(f)
V3_up = np.empty_like(f)
V2_out_up = np.empty(f.shape, dtype = np.complex128)
V3V3_down = np.empty(f.shape)
V3_down = np.empty_like(f)
V2_out_down = np.empty_like(f,dtype = np.complex128)
################ 3rd Version
for n in xrange(f.shape[0]):
coefs = np.array([np.square(delta * f[n]/V30V30 )+ np.square(eta*f_0/(2*Qtl*V30V30)), 2*(delta*(f[n]-f_0)*f[n]/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(f[n]-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)])
V3V3[n] =np.ma.array(np.roots(coefs),mask= np.iscomplex(np.roots(coefs)),fill_value = 1)
V3[n] = np.ma.sqrt(V3V3[n])
# exp_phi_V3[n] is e^{i phi_V3} - no minus sign
exp_phi_V3[n] = f_0*np.exp(np.complex(0,1.0)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) *np.power( ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0)) * V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n],-1.0 )
# V2_out[n] is V_2^out * e^(-i phi_2)
V2_out[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
# calculate observed for upsweep and down sweep
# min |--> up sweep (like at UCB),
# max |--> down sweep
V3V3_down[n] = np.extract(~V3V3[n].mask,V3V3[n]).max().real
V3_down[n] = np.sqrt(V3V3_down[n])
V2_out_down[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_down[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3_down[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
V3V3_up[n] = np.extract(~V3V3[n].mask,V3V3[n]).min().real
V3_up[n] = np.sqrt(V3V3_up[n])
V2_out_up[n] = V1*((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_up[n]/V30V30) + np.complex(0,2)* (((f[n]-f_0)/f_0) + delta*(V3V3_up[n]/V30V30)*(f[n]/f_0))))*np.exp(np.complex(0,2.0)*phi31))
####################
# sV3V3_up = ((2.0*V1/Qc) * np.power(2*V2_out_up*np.exp(np.complex(0,-2.0)*phi31)+V1*(1-np.exp(np.complex(0,-2.0)*phi31)),-1) - (1.0/Qc) - (1.0/Qtl) - (np.complex(0,2.0) * (f-f_0)/f_0)) * V30V30 *np.power((eta/Qtl) + np.complex(0,2.0)*(delta*f/f_0),-1)
# sV3V3_up = sV3V3_up.real
# sV3_up = np.sqrt(sV3V3_up)
# residual1 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual2 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual3 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# residual4 = np.empty(sV3V3_up.shape,dtype = np.complex128)
# for n in xrange(f.shape[0]):
# coefs = np.array([np.square(delta * f[n]/V30V30 )+ np.square(eta*f_0/(2*Qtl*V30V30)), 2*(delta*(f[n]-f_0)*f[n]/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(f[n]-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)])
# residual1[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * sV3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* sV3V3_up[n]*sV3_up[n])
# residual2[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * V3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3_up[n]* V3_up[n])
# residual3[n] = np.polyval(coefs,sV3V3_up[n] ) # Exaluate the V3V3 qubic using the sV3V3_up synthesized from S21
# residual4[n] = np.polyval(coefs,V3V3_up[n] ) # Exaluate the V3V3 qubic using the V3V3_up computed from polynomial roots
# #if residual2 - residual3 = 0 then V3V3_up = sV3V3_up to high enough accuracy
# sumsq = np.square(residual2).sum()
#We use the solution to the cubic for for one scan direction to construct the other two solutions
V2cubic = V2_out_down
S21 = V2cubic/V1
V3__ = np.empty_like(f)
V3__ = (S21 + (np.exp(np.complex(0,2.0)*phi31)-1)/2.)*V1*np.sqrt(Z3*Qc/(Z1*np.pi))* np.exp(np.complex(0,-1.0)*phi31)
z1 = eta/(Qtl*V30V30)+ np.complex(0,1.0)*(2*delta*f)/(V30V30*f_0)
z2 = (1.0/Qc) + (1/Qtl) + np.complex(0,2.0) *(f-f_0)/f_0
z1z2c = z1*z2.conjugate()
z1z1 = z1*z1.conjugate()
z2z2 = z2*z2.conjugate()
v1 = V3__*V3__.conjugate()
term1 = -(z1z2c.real/z1z1) - v1/2.0
term2 = np.complex(0,1)*np.sqrt(4*z1z2c.imag*z1z2c.imag + 3*v1*v1*z1z1*z1z1 + 4*z1z1*z1z2c.real*v1)/(2*z1z1)
v2 = term1 + term2
v3 = term1 - term2
V3p__ = np.sqrt(v2)
V3m__ = np.sqrt(v3)
S21p= ((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*v2/V30V30) + np.complex(0,2)* (((f-f_0)/f_0) + delta*(v2/V30V30)*(f/f_0))))*np.exp(np.complex(0,2.0)*phi31))
S21m = ((1-np.exp(np.complex(0,2.0)*phi31))/2 +( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*v3/V30V30) + np.complex(0,2)* (((f-f_0)/f_0) + delta*(v3/V30V30)*(f/f_0))))*np.exp(np.complex(0,2.0)*phi31))
#V3c__ = V3__.conjugate()
#f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) - (( ((f_0/(2.0*Q)) + np.complex(0,1.0)*(f[n]-f_0)) * sV3_up[n]) + ((eta*f_0/(2.0*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* sV3V3_up[n]*sV3_up[n])
############### 2nd Version
# def roots(freq):
# coefs = [np.square(delta * freq/V30V30 )+ np.square(eta*f_0/(4*Qtl*V30V30)), 2*(delta*(freq-f_0)*freq/V30V30 + eta*f_0*f_0/(4*Qtl*V30V30*Q) ),np.square(freq-f_0) + np.square(f_0/(2*Q)), -1.0*f_0*f_0*Z3*V1V1/(4*np.pi*Qc*Z1)]
# return np.roots(coefs)
# for n in xrange(f.shape[0]):
# V3V3[n] = roots(f[n])
# V3[n] = np.sqrt(V3V3[n])
# test[n] = V3[n]
# # exp_phi_V3[n] is e^{i phi_V3} - no minus sign
# exp_phi_V3[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) / ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0) *V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n] )
# # V2_out[n] is V_2^out * e^(-i phi_2)
# V2_out[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0)))))
# V3V3 = np.ma.masked_where(np.iscomplex(V3V3), V3V3, copy=True)
# V3V3.fill_value = 1
# V3.mask = V3V3.mask
# exp_phi_V3.mask = V3V3.mask
# V2_out.mask = V3V3.mask
# for n in xrange(f.shape[0]):
# #calculate observed upsweep values
# V3V3_up[n] = np.max(np.abs(V3V3[n].compressed()))
# V3_up[n] = np.sqrt(V3V3_up[n])
# V2_out_up[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3_up[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3_up[n]/V30V30)*(f[n]/f_0)))))
#################
# ################## 1st Version
# for n in xrange(f.shape[0]):
# V3V3[n] = roots(f[n])
# #Where are there 3 real solutions?
# # if np.isreal(V3V3[n,0]) & np.isreal(V3V3[n,1]) & np.isreal(V3V3[n,2]):
# # print(n)
# V3V3 = np.ma.masked_where(np.iscomplex(V3V3), V3V3, copy=True)
# V3 = np.sqrt(V3V3)
# exp_phi_V3 = np.ma.empty_like(V3)
# V2_out = np.ma.empty_like(V3)
# for n in xrange(f.shape[0]):
# # exp_phi_V3[n] is e^{i phi_V3} - no minus sign
# exp_phi_V3[n] = f_0*np.exp(np.complex(0,1)*phi31)*V1*np.sqrt(Z3)/(2*np.sqrt(np.pi*Qc*Z1)) / ( ((f_0/(2*Q)) + np.complex(0,1)*(f[n]-f_0) *V3[n]) + ((eta*f_0/(2*Qtl*V30V30)) + np.complex(0,1)*(delta * f[n]/V30V30))* V3V3[n]*V3[n] )
# # V2_out_phasor[n] is V_2^out * e^(-i phi_2)
# V2_out[n] = V1*(1 -( (1/Qc) / ((1/Qc) + (1/Qtl)*(1+eta*V3V3[n]/V30V30) + 2*np.complex(0,1)* (((f[n]-f_0)/f_0) + delta*(V3V3[n]/V30V30)*(f[n]/f_0)))))
# ##################
fig = plt.figure( figsize=(6, 6), dpi=150)
ax = {}
ax[1] = fig.add_subplot(2,2,1)
dff = (f - f_0)/f_0
trans = (V2_out/V1)
# dfff = np.array([dff,dff,dff]).transpose()
# dff = ma.array(dfff, mask = trans.mask)
# trans2 = trans.compressed()
# dff2 = dff.compressed()
trans_up = V2_out_up/V1
trans_down = (V2_out_down/V1)
transp=S21p[~np.iscomplex(V3p__)]
transm=S21m[~np.iscomplex(V3m__)]
curve = ax[1].plot(dff,20*np.log10(np.abs(trans)),color = 'g', linestyle = '-',linewidth = 2)
curve_up = ax[1].plot(dff,20*np.log10(np.abs(trans_up)), color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
curve_down = ax[1].plot(dff,20*np.log10(np.abs(trans_down)), color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
ax[1].set_title('Mag Transmission')
ax[1].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[1].set_ylabel(r'$20 \cdot \log_{10}|S_{21}|$ [dB]', color='k')
ax[1].yaxis.labelpad = 0
ax[1].ticklabel_format(axis='x', style='sci',scilimits = (0,0), useOffset=True)
#ax[1].xaxis.set_ticks(np.hstack((np.arange(-numBW/2.0,0,f_0/Q),np.arange(0,numBW/2.0,f_0/Q))) )
parameter_dict = {'f_0':f_0, 'Qtl':Qtl, 'Qc':Qc, 'phi31':phi31, 'eta':eta, 'delta':delta, 'Zfl':Z1, 'Zres':Z3, 'phiV1':phiV1, 'V30V30':V30*V30}
note = '$P_{probe}$' + ' {:3.0f} dBm, '.format(Pprobe_dBm)+'\n' +(r'$f_0$ = {f_0:3.2e} Hz,' + '\n' + '$Q_{sub1}$ = {Qtl:3.2e},' +'\n' +' $Q_c$ = {Qc:3.2e},' +
'\n' + r'$\phi_{sub2}$ = {ang:3.2f}$^\circ$,'+ '\n' + '${l1}$ = {et:3.2e},' + '\n' +'${l2}$ = {de:3.2e}').format(
nl = '\n', et = parameter_dict['eta']/parameter_dict['V30V30'],
de = parameter_dict['delta']/parameter_dict['V30V30'],
l1 = r'{\eta}/{V_{3,0}^2}',
l2 = r'{\delta}/{V_{3,0}^2}',
ang = parameter_dict['phi31']*180/np.pi,
sub1 = '{i}', sub2 = '{31}',**parameter_dict)
ax[1].text(0.99, 0.01, note,
verticalalignment='bottom', horizontalalignment='right',
transform=ax[1].transAxes,
color='black', fontsize=4)
ax[2] = fig.add_subplot(2,2,2)
curve = ax[2].plot(dff,np.abs(V3),color = 'g', linestyle = '-',linewidth = 2)# <- V3 has complex values when it shouldn't !! should this be real part or abs?
upcurve = ax[2].plot(dff,np.abs(V3_up),color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
upcurve = ax[2].plot(dff,np.abs(V3_down),color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
#upcurve__ = ax[2].plot(dff,np.abs(V3__),linestyle = '--')
#curve__ = ax[2].plot(dff[~np.iscomplex(V3p__)].real,V3p__[~np.iscomplex(V3p__)].real,linestyle = '--', marker = '1')
#curve__ = ax[2].plot(dff[~np.iscomplex(V3m__)].real,V3m__[~np.iscomplex(V3m__)].real,linestyle = '--', marker = '2')
ax[2].set_title('Cavity Amplitude')
ax[2].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[2].set_ylabel(r'Volts', color='k')
ax[2].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
ax[3] = fig.add_subplot(2,2,3,aspect='equal')
loop = ax[3].plot(trans.real, trans.imag,color = 'g', linestyle = '-',linewidth = 2)#, label = 'Full Solution')
loop[0].set_label('Full Solution')
loop_down = ax[3].plot(trans_down.real, trans_down.imag,color = 'k', linestyle = '--', alpha = .35, linewidth = 1,label = 'Down Sweep')
#loop = ax[3].plot(transp.real,transp.imag,linestyle = '--', marker = '1')
#oop = ax[3].plot(transm.real,transm.imag,linestyle = '--', marker = '2')
#firstpt = ax[3].plot(trans.real[0:10], trans.imag[0:10], 'ok')
loop_up = ax[3].plot(trans_up.real, trans_up.imag,color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
ax[3].set_title('Resonance Loop')
ax[3].set_xlabel(r'$\Re$[$S_{21}$]', color='k')
ax[3].set_ylabel(r'$\Im$[$S_{21}$]', color='k')
ax[3].yaxis.labelpad = 0
ax[3].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
ax[3].legend(loc = 'upper center', fontsize=7, bbox_to_anchor=(1.5, -.15), ncol=3,scatterpoints =1, numpoints = 1, labelspacing = .02)
ax[4] = fig.add_subplot(2,2,4)
trans_phase = np.ma.array(np.angle(trans),mask = trans.mask)
trans_up_phase = np.angle(trans_up)
trans_down_phase = np.angle(trans_down)
phase_ang_curve = ax[4].plot(dff,trans_phase,color = 'g', linestyle = '-',linewidth = 2)
phase_up_ang_curve = ax[4].plot(dff,trans_up_phase,color = 'k', linestyle = ':', alpha = .35,linewidth = 1, label = 'Up Sweep')
phase_down_ang_curve = ax[4].plot(dff,trans_down_phase,color = 'k', linestyle = '--', alpha = .35,linewidth = 1, label = 'Down Sweep')
ax[4].set_title('Transmitted Phase Angle')
ax[4].set_xlabel(r'$\delta f_0 / f_0$', color='k')
ax[4].set_ylabel(r'Ang[$S_{21}$]', color='k')
ax[4].yaxis.labelpad = 0
ax[4].ticklabel_format(axis='x', style='sci',scilimits = (0,0),useOffset=False)
for k in ax.keys():
ax[k].tick_params(axis='y', labelsize=5)
ax[k].tick_params(axis='x', labelsize=5)
plt.subplots_adjust(left=.1, bottom=.1, right=None ,wspace=.35, hspace=.3)
#plt.subplots_adjust(left=.1, bottom=.1, right=None, top=.95 ,wspace=.4, hspace=.4)
#plt.suptitle('Nonlinear Resonator Plots')
plt.show()
if Use_Run_45a:
Title = '45a_Nonlinear_Solition_Pprobe_-54dBm'
#swp._save_fig_dec(fig, Title.replace('\n','_').replace(' ','_'), Use_Date = Use_Date )
#fig.savefig('Nonlinear_Res',dpi=300, transparency = True)
| mit |
ThomasA/pywt | demo/dwt_signal_decomposition.py | 1 | 1808 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pywt
ecg = np.load(os.path.join('data', 'ecg.npy'))
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.MODES.sp1
def plot_signal_decomp(data, w, title):
"""Decompose and plot a signal S.
S = An + Dn + Dn-1 + ... + D1
"""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
fig = plt.figure()
ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, y in enumerate(rec_a):
ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
ax.plot(y, 'r')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("A%d" % (i + 1))
for i, y in enumerate(rec_d):
ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
ax.plot(y, 'g')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("D%d" % (i + 1))
plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity")
plot_signal_decomp(data2, 'sym5',
"DWT: Frequency and phase change - Symmlets5")
plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5")
plt.show()
| mit |
rmp91/jitd | java/benchmark.py | 1 | 14960 | #!/usr/bin/env
import subprocess
import glob
import os
import re
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import benchmark_configuration as config
def deleteFiles(pattern):
for fl in glob.glob(os.getcwd()+os.path.sep+pattern):
if os.path.isfile(fl):
os.remove(fl)
def deleteFile(filePath):
if os.path.isfile(filePath):
os.remove(filePath)
def deleteDataFiles():
deleteFiles("data*")
def deleteIndexFiles():
deleteFiles("index*")
def performCleanup():
deleteDataFiles()
deleteIndexFiles()
# In case you don't want to use timestamp as suffix set the flag
# 'appendTimeStampInBenchmarkFolder = False'
if config.appendTimeStampInBenchmarkFolder:
# Update the output parents with the folder name plus time stamps
timestamp = time.time()
stringTimeStamp = datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d_%H%M%S')
config.xy_output_parent = config.xy_output_parent + '_' + stringTimeStamp
config.scatter_output_parent = config.scatter_output_parent + '_' + stringTimeStamp
xy_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
xy_output_file_path = config.xy_output_parent + os.path.sep + config.xy_output_file + "." + config.xy_log_output_extension
time_output_file_path = config.xy_output_parent + os.path.sep + config.time_output_file + "." + config.xy_output_extension
data_written_output_file_path = config.xy_output_parent + os.path.sep + config.data_written_output_file + "." + config.xy_output_extension
data_read_output_file_path = config.xy_output_parent + os.path.sep + config.data_read_output_file + "." + config.xy_output_extension
scatter_file_path = config.scatter_parent_folder + os.path.sep + config.scatter_sim_file + "." + config.xy_extension
gc_file_path = config.xy_parent_folder + os.path.sep + config.xy_sim_file + "." + config.xy_extension
figCount = 1;
#Run a cleanup incase anything was already generated
performCleanup()
gc_benchmark_initialised = False
# Run the following set of instructions for all possible VM Arguments
if config.xy_plots:
total_gc_time_for_all_runs = 0.0
gc_benchmark_file = None
gc_benchmark_file_path = config.xy_output_parent + os.path.sep + config.gc_output_file + "_" + config.xy_sim_file + "." + config.xy_output_extension
for key in sorted(config.xy_vm_argument):
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.xy_output_parent + os.path.sep + config.gc_log_file + "_" + config.xy_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
total_gc_time_for_all_runs = 0.0
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
for i in xrange(0,config.runs):
print "Run Count :" + str(i+1) + "\n"
# This will simulate the calling of "java -Xmx50M -cp build:lib/* jitd.benchmark.BenchmarkGenerator"
#p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator", xy_file_path, xy_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.BenchmarkGenerator",xy_file_path, xy_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
print "Running Cleanup operations for Run "+str(i+1)+"\n"
# Delete all the generated data files
performCleanup()
print "Cleanup operations for Run "+str(i+1)+"\n"
time.sleep(5)
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Time taken in Garbage Collection Run "+str(i+1)+"\n"
total_gc_time_for_all_runs = total_gc_time_for_all_runs + gc_time
#print "\n"
average_gc_time = total_gc_time_for_all_runs / config.runs
print "Average Total Time spent in GC for Heap Size of " + str(key)+"MB :" + str(average_gc_time) + " seconds"
gc_benchmark_file.write(str(key)+","+str(average_gc_time)+"\n")
print "-----------------------------------------------------------\n"
# Close the file
gc_benchmark_file.close()
print "All the runs have completed successfully\n"
print "\n"
if config.gc_plots:
# Plot the graph
# GC Time vs Heap Size
figure = plt.figure(figCount)
data = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time spent in Garbage Collection (in seconds)")
plt.title("Time spent in Garbage Collection on different Heap Sizes")
plt.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
print "Fetching data from the logs to generate averaged data.\n"
print "\n"
# Call the program to Analyze the generated log and put it in a CSV
p = subprocess.Popen(["java", "-cp", config.classpath,"jitd.benchmark.BenchmarkLogAnalyzer",xy_output_file_path, time_output_file_path, data_written_output_file_path, data_read_output_file_path])
p.wait()
print "Data Calculation completed."
print "Generating graphs"
# Calculate the generated CSV File names based on the scatter
# Plot the graphs
if config.xy_heap_vs_time:
figure1 = plt.figure(figCount)
# Time vs Heap Size Graph
data = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Time Taken (in seconds)")
plt.title("Time taken in cracker mode on different Heap Sizes")
plt.grid(True)
figure1.savefig(config.xy_output_parent+os.path.sep+'time-vs-heap-size-'+config.xy_sim_file+'.png')
figure1.show()
figCount = figCount + 1
if config.xy_heap_vs_data_written:
# Data Written vs Heap Size
figure2 = plt.figure(figCount)
data = np.genfromtxt(data_written_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data written to disk (in MB)")
plt.title("Total data written to disk on different Heap Sizes")
plt.grid(True)
figure2.savefig(config.xy_output_parent+os.path.sep+'bytes-written-vs-heap-size-'+config.xy_sim_file+'.png')
figure2.show()
figCount = figCount + 1
if config.xy_heap_vs_data_read:
# Data Read vs Heap Size
figure3 = plt.figure(figCount)
data = np.genfromtxt(data_read_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
plt.plot(data['x'], data['y'],'o-')
plt.xlabel("Heap Size (in MB)")
plt.ylabel("Total data read from the disk (in MB)")
plt.title("Total data read from disk on different Heap Sizes")
plt.grid(True)
figure3.savefig(config.xy_output_parent+os.path.sep+'bytes-read-vs-heap-size-'+config.xy_sim_file+'.png')
figure3.show()
figCount = figCount + 1
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Time vs Heap Size Graph
data1 = np.genfromtxt(time_output_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1['y'], width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data1['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.xy_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.xy_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Make sure all data files are deleted before exiting
# Delete all the generated data files
performCleanup()
# Generate the scatter plots
if config.scatter_plots:
gc_benchmark_file_path = config.scatter_output_parent + os.path.sep + config.gc_output_file + "_" + config.scatter_sim_file + "." + config.scatter_output_extension
gc_benchmark_initialised = False
total_runtime_list = []
idx = 0
for key in sorted(config.scatter_vm_argument):
# vm_argument = "-Xmx" + str(key)+"M"
# vm_argument = "-Xmx" + str(key)+"M"
# -Xmx50M -Xloggc:benchmark/gc1.log -verbose:gc -XX:+PrintGCDetails
heap_size = "-Xmx" + str(key)+"M"
gc_output_log_file_path = config.scatter_output_parent + os.path.sep + config.gc_log_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.gc_log_extension
gc_log = "-Xloggc:"+ gc_output_log_file_path
# Create the directory already because Java won't create it for Log files
directory = os.path.dirname(gc_output_log_file_path)
if not os.path.exists(directory):
os.makedirs(directory)
verbose_gc = "-verbose:gc"
print_gc = "-XX:+PrintGCDetails"
# Perform Cleanup - Delete GC Log if it exists
deleteFile(gc_output_log_file_path)
scatter_output_file_path = config.scatter_output_parent + os.path.sep + config.scatter_output_file + "_" + config.scatter_sim_file + "_" + str(key) + "m" + "." + config.scatter_output_extension
print "-----------------------------------------------------------\n"
print "Running with Heap Size : "+ str(key)+"MB" + "\n"
# p = subprocess.Popen(["java", vm_argument, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
p = subprocess.Popen(["java", heap_size, gc_log, verbose_gc, print_gc, "-cp", config.classpath,"jitd.benchmark.ScriptDriverBenchmark",scatter_file_path, scatter_output_file_path])
# Wait for the above process to complete.
# Removing this statement might cause following instructions to run before the previous command completes executions
p.wait()
# Delete all the generated data files
performCleanup()
print "Cleanup operations finished\n"
time.sleep(5)
print "\n"
# Analyzing the logs
print "Analyzing the GC Log for Heap Size : "+ str(key)+"MB" + "\n"
gc_time = 0
if not gc_benchmark_initialised:
gc_benchmark_file = open(gc_benchmark_file_path, "w")
gc_benchmark_file.write("Heap Size (in MB),Time spent in Garbage Collection(in seconds)\n")
gc_benchmark_initialised = True
with open(gc_output_log_file_path) as f:
for line in f:
# If line starts with decimal
if re.match("^\d+\.\d+",line):
# Find all decimals, we will need 1st in all decimals
decimals = re.findall("\d+\.\d+", line)
if len(decimals) > 1:
# print decimals[1]
gc_time = gc_time + float(decimals[1])
print "Total Time spent in Garbage Collection for Heap Size of " + str(key)+"MB :" + str(gc_time) + " seconds \n"
gc_benchmark_file.write(str(key)+","+str(gc_time)+"\n")
# Scatter plot for
# using invalid_raise = False, ignores any row with missing values without raising exception
# using dtaype = None, makes python calculate data types by itself
data = np.genfromtxt(scatter_output_file_path, delimiter=',', invalid_raise = False, dtype = None, names=['x','y','z'])
# Calculate the total runtime and put it in the list
total_runtime = sum(data['y'])
total_runtime_list.insert(idx, total_runtime)
idx += 1
use_color = {"WRITE":"red","READ":"blue"}
color_map = []
s_map = []
i = 0
for x in data['z']:
color_map.insert(i,use_color[x])
if(x == "WRITE"):
s_map.insert(i,10)
else:
s_map.insert(i,1)
i = i + 1
figure = plt.figure(figCount)
# Specify color maps for data points using color = color_map
plt.scatter(data['x'],data['y'], s=s_map, color=color_map)
plt.xlabel("Number of Iterations")
plt.yscale('log')
plt.ylabel("Time (in seconds)")
plt.title("System Performance in cracker mode with heap size "+str(key)+"MB")
plt.grid(True)
plt.plot()
plt.ylim([0.0000001,1000])
# Legend
classes = ['Write','Read']
class_colours = ['r','b']
recs = []
# Generate the legend for the graph
for i in range(0,len(class_colours)):
recs.append(mpatches.Rectangle((0,0),1,1,fc=class_colours[i]))
plt.legend(recs,classes)
figure.savefig(config.xy_output_parent+os.path.sep+'performance_'+str(key)+"m"+'.png')
figure.show()
figCount = figCount + 1
print "\nTotal runtime for Heap Size of "+str(key) + "MB" + " :" + str(total_runtime)
print "-----------------------------------------------------------\n"
if config.total_time_vs_gc_time:
figure = plt.figure(figCount)
ax = figure.add_subplot(111)
# Close the file
gc_benchmark_file.close()
# Time vs Heap Size Graph
data1 = total_runtime_list
data2 = np.genfromtxt(gc_benchmark_file_path, delimiter=',', invalid_raise = False, skip_header=1, names=['x', 'y'])
index = np.arange(len(data1))
width = 0.25
rects1 = ax.bar(index, data1, width, color = 'b')
rects2 = ax.bar(index + width, data2['y'], width, color = 'r')
ax.set_xlabel("Heap Size (in MB)")
ax.set_xticks(index + width)
ax.set_xticklabels(data2['x'])
ax.set_ylabel("Time Taken (in seconds)")
ax.set_title("Time taken in cracker mode on different Heap Sizes for Scatter Plots")
ax.legend((rects1[0], rects2[0]), ('Total Runtime', 'Garbage Collection'))
ax.grid(True)
figure.savefig(config.scatter_output_parent+os.path.sep+'gc-time-total-runtime-vs-heap-size-'+config.scatter_sim_file+'.png')
figure.show()
figCount = figCount + 1
# Following line will keep the graphs alive
print "Press Enter or Ctrl-C to exit"
raw_input() | apache-2.0 |
fzalkow/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
googleinterns/where-is-my-watch | GpsDataAnalyzer/calculator/deviation_calculator.py | 1 | 8378 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles Calculations on pairs of GPS data sets.
Usage:
gps_fileparser = FileParser()
phone_data_set = gps_fileparser.parse_file("<file_path>/2020-07-21T19:48:44.697Z.xml")
simulator_data_set = gps_fileparser.parse_file("<file_path>/GPSSIM-2020-07-21_19:49:31.csv")
downsampled_list = simulator_data_set[0].gps_data_list[::10]
simulator_data_set[0].gps_data_list = downsampled_list
calculator = DataSetDeviationCalculator(phone_data_set, simulator_data_set[0])
devation_dataframe = calculator.get_deviation_dataframe()
"""
from datetime import datetime, timedelta
from datetime import timezone
import time
import numpy as np
import pandas as pd
from GpsDataAnalyzer import utils
from GpsDataAnalyzer.fileparser.fileparser import FileParser
from GpsDataAnalyzer.calculator import alignment_algorithms
class DataSetDeviationCalculator:
"""An object for Calculating Deviations on two data sets.
Attributes:
data_set_1: GpsDataSet
data_set_2: GpsDataSet
starting_time_1: Datetime, offset included start time for 1st set
starting_time_2: Datetime, offset included start time for 2nd set
ending_time_1: Datetime, offset included end time for 1st set
ending_time_2: Datetime, offset included end time for 2nd set
offset_mapping_1: Dictionary, {DateTime: [GpsData, ], ...}
offset_mapping_2: Dictionary, {DateTime: [GpsData, ], ...}
deviations_dataframe: Pandas Dataframe that holds values after calculation
"""
def __init__(self, data_set_1, data_set_2):
self.data_set_1 = data_set_1
self.data_set_2 = data_set_2
self.starting_time_1 = None
self.starting_time_2 = None
self.ending_time_1 = None
self.ending_time_2 = None
self.offset_mapping_1= {}
self.offset_mapping_2 = {}
self.deviations_dataframe = None
self.availability = None
start = time.perf_counter()
print("Optimized lineup implementation:")
self.starting_time_1, self.starting_time_2 = alignment_algorithms.find_lineup(self.data_set_1,
self.data_set_2)
end = time.perf_counter()
print(f"Lined up data in {end - start:0.4f} seconds")
print("start time 1: " + str(self.starting_time_1))
print("start time 2: " + str(self.starting_time_2))
print("\n")
self.ending_time_1 = self.data_set_1.gps_meta_data.end_time
self.ending_time_2 = self.data_set_2.gps_meta_data.end_time
if not self.starting_time_1 and not self.starting_time_2:
self.offset_mapping_1 = alignment_algorithms.create_time_to_points_mapping(self.data_set_1, 0)
self.offset_mapping_2 = alignment_algorithms.create_time_to_points_mapping(self.data_set_2, 0)
elif self.data_set_1.gps_data_list[0].time > self.data_set_2.gps_data_list[0].time:
offset = (self.starting_time_1-self.starting_time_2).total_seconds()
self.offset_mapping_1 = alignment_algorithms.create_time_to_points_mapping(self.data_set_1, 0)
self.offset_mapping_2 = alignment_algorithms.create_time_to_points_mapping(self.data_set_2, offset)
self.ending_time_2 = self.ending_time_2 + timedelta(seconds=offset)
else:
offset = (self.starting_time_2-self.starting_time_1).total_seconds()
self.offset_mapping_1 = alignment_algorithms.create_time_to_points_mapping(self.data_set_1, offset)
self.offset_mapping_2 = alignment_algorithms.create_time_to_points_mapping(self.data_set_2, 0)
self.ending_time_1 = self.ending_time_1 + timedelta(seconds=offset)
def get_deviation_dataframe(self):
"""
Extracts and returns deviation for each valid timestamp & other information.
Returns:
A pandas dataframe including the shared timestamp with the offset included,
the deviations of lat/lon, the difference in speed, the difference in
altitude, and the original timestamps for each set
"""
if self.deviations_dataframe is not None:
return self.deviations_dataframe
time_list, distance_deviation_list, speed_deviation_list, altitude_deviation_list= [], [], [], []
set1_time_list, set2_time_list = [], []
set1_average_signal_list, set2_average_signal_list, signal_deviation_list = [], [], []
for timestamp in self.offset_mapping_1:
if timestamp in self.offset_mapping_2:
time_list.append(timestamp)
# Get the mapping pair of data points in each dataset
point1 = self.offset_mapping_1[timestamp][0]
point2 = self.offset_mapping_2[timestamp][0]
# Calculate the distance deviation
location1 = (point1.latitude, point1.longitude)
location2 = (point2.latitude, point2.longitude)
distance_deviation_list.append(utils.calculate_distance(location1, location2))
# Calculate the speed differentials
speed_deviation_list.append(point2.speed - point1.speed)
# Calculate the altitude differentials
if point1.altitude is None or point2.altitude is None:
altitude_deviation_list.append(None)
else:
altitude_deviation_list.append(point2.altitude - point1.altitude)
# Append the original timestamp in each dataset
set1_time_list.append(point1.time)
set2_time_list.append(point2.time)
# Append the average signal if have
set1_average_signal_list.append(point1.average_signal)
set2_average_signal_list.append(point2.average_signal)
signal_deviation_list.append(point2.average_signal - point1.average_signal)
self.deviations_dataframe = pd.DataFrame({"Common Timestamp": time_list,
"Distance Deviations": distance_deviation_list,
"Speed Deviations": speed_deviation_list,
"Altitude Deviations": altitude_deviation_list,
"Set 1 Timestamp": set1_time_list,
"Set 2 Timestamp": set2_time_list,
"Set 1 Average Signal": set1_average_signal_list,
"Set 2 Average Signal": set2_average_signal_list,
"Signal Deviations": signal_deviation_list})
return self.deviations_dataframe
def get_availability(self):
"""
Calculate the availability of wear captured gps data
Returns:
Percentile of wear captured gps data by compared gpsdataset
"""
if self.availability:
return self.availability
if not self.starting_time_1 and not self.starting_time_2:
return 0
total_timestamps = 0
available_timestamps = 0
start_time = utils.round_time(max(self.starting_time_1, self.starting_time_2))
end_time = utils.round_time(min(self.ending_time_1, self.ending_time_2))
total_seconds = int((end_time-start_time).total_seconds())
for timestamp in [start_time + timedelta(seconds=x) for x in range(total_seconds)]:
if timestamp in self.offset_mapping_1 and timestamp in self.offset_mapping_2:
available_timestamps += 1
return round(available_timestamps / total_seconds, 4)*100
| apache-2.0 |
chandlercr/aima-python | submissions/Porter/myNN.py | 16 | 6217 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (100, 50, ),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Teach a Neural net to count 2
'''
count22 = DataFrame()
count22.data = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
count22.feature_names = ['a', 'b', 'c']
count22.target = [0, 0, 0, 1,
0, 1, 1, 0]
count22.target_names = ['Two']
countMLPC = MLPClassifier(
hidden_layer_sizes = (3,), # (100,),
# activation = 'relu',
solver='sgd', # 'adam',
# alpha = 0.0001,
# batch_size='auto',
# learning_rate = 'constant',
# power_t = 0.5,
max_iter = 10, # 200,
# shuffle = True,
# random_state = None,
# tol = 1e-4,
verbose = True # False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
Examples = {
# 'TrumpDefault': {
# 'frame': trumpECHP,
# },
# 'TrumpSGD': {
# 'frame': trumpECHP,
# 'mlpc': mlpc
# },
# 'TrumpScaled': {
# 'frame': trumpScaled,
# },
'Count to 2': {
'frame': count22,
'mlpc': countMLPC
}
} | mit |
RobertABT/heightmap | build/matplotlib/examples/axes_grid/scatter_hist.py | 8 | 1562 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
fig, axScatter = plt.subplots(figsize=(5.5,5.5))
# the scatter plot:
axScatter.scatter(x, y)
axScatter.set_aspect(1.)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistx = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
axHisty = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
# make some labels invisible
plt.setp(axHistx.get_xticklabels() + axHisty.get_yticklabels(),
visible=False)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max( [np.max(np.fabs(x)), np.max(np.fabs(y))] )
lim = ( int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
# the xaxis of axHistx and yaxis of axHisty are shared with axScatter,
# thus there is no need to manually adjust the xlim and ylim of these
# axis.
#axHistx.axis["bottom"].major_ticklabels.set_visible(False)
for tl in axHistx.get_xticklabels():
tl.set_visible(False)
axHistx.set_yticks([0, 50, 100])
#axHisty.axis["left"].major_ticklabels.set_visible(False)
for tl in axHisty.get_yticklabels():
tl.set_visible(False)
axHisty.set_xticks([0, 50, 100])
plt.draw()
plt.show()
| mit |
adamgonzalez/analysis | COV_CI.py | 1 | 11142 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 11:25:09 2017
@author: Adam
"""
import os
import numpy as np
import math
import cmath
import matplotlib
import matplotlib.pyplot as plt
from astropy.io import fits
matplotlib.rcParams.update({'font.size': 18})
matplotlib.rcParams['axes.linewidth'] = 1
# Function to compute the average count rate of a file (light curve or background)
def data_cleaner(d_t_raw, d_r_raw, d_e_raw):
n = 0
for i in range (0,len(d_t_raw)):
if (math.isnan(d_r_raw[i]) == False):
n += 1
d_t = np.zeros(n)
d_r = np.zeros(n)
d_e = np.zeros(n)
n = 0
for i in range (0,len(d_t_raw)):
if (math.isnan(d_r_raw[i]) == False):
d_t[n] = d_t_raw[i]
d_r[n] = d_r_raw[i]
d_e[n] = d_e_raw[i]
n += 1
d_t = d_t - d_t[0]
a_r = np.average(d_r)
return d_t, d_r, d_e, n, a_r
os.chdir("/Users/agonzalez/Documents/Research/Data/IZw1")
#pic = '2769_COVvE_603.png'
# ORBIT 2768
with open(name='2768/lc_covCI_2768_600.txt',mode='r') as lcfile:
#with open('2768/covariance_lists/lc_covCI_2768_601.txt','r') as lcfile:
#with open('2768/covariance_lists/lc_covCI_2768_602.txt','r') as lcfile:
#with open('2768/covariance_lists/lc_covCI_2768_603.txt','r') as lcfile:
#with open('2768/covariance_lists/lc_covCI_2768_604.txt','r') as lcfile:
# ORBIT 2769
#with open('2769/covariance_lists/lc_covCI_2769_600.txt','r') as lcfile:
#with open('2769/covariance_lists/lc_covCI_2769_601.txt','r') as lcfile:
#with open('2769/covariance_lists/lc_covCI_2769_602.txt','r') as lcfile:
#with open('2769/covariance_lists/lc_covCI_2769_603.txt','r') as lcfile:
#with open('2769/covariance_lists/lc_covCI_2769_604.txt','r') as lcfile:
lc_fnames = [line.rstrip('\n') for line in lcfile]
lcfile.close()
n_lc = len(lc_fnames)
# ORBIT 2768
with open('2768/lcref_covCI_2768_600.txt','r') as reffile:
#with open('2768/covariance_lists/lcref_covCI_2768_601.txt','r') as reffile:
#with open('2768/covariance_lists/lcref_covCI_2768_602.txt','r') as reffile:
#with open('2768/covariance_lists/lcref_covCI_2768_603.txt','r') as reffile:
#with open('2768/covariance_lists/lcref_covCI_2768_604.txt','r') as reffile:
# ORBIT 2769
#with open('2769/covariance_lists/lcref_covCI_2769_600.txt','r') as reffile:
#with open('2769/covariance_lists/lcref_covCI_2769_601.txt','r') as reffile:
#with open('2769/covariance_lists/lcref_covCI_2769_602.txt','r') as reffile:
#with open('2769/covariance_lists/lcref_covCI_2769_603.txt','r') as reffile:
#with open('2769/covariance_lists/lcref_covCI_2769_604.txt','r') as reffile:
ref_fnames = [line.rstrip('\n') for line in reffile]
reffile.close()
n_ref = len(ref_fnames)
# ORBIT 2768
with open('2768/bg_covCI_2768_600.txt','r') as bgfile:
#with open('2768/covariance_lists/bg_covCI_2768_601.txt','r') as bgfile:
#with open('2768/covariance_lists/bg_covCI_2768_602.txt','r') as bgfile:
#with open('2768/covariance_lists/bg_covCI_2768_603.txt','r') as bgfile:
#with open('2768/covariance_lists/bg_covCI_2768_604.txt','r') as bgfile:
# ORBIT 2769
#with open('2769/covariance_lists/bg_covCI_2769_600.txt','r') as bgfile:
#with open('2769/covariance_lists/bg_covCI_2769_601.txt','r') as bgfile:
#with open('2769/covariance_lists/bg_covCI_2769_602.txt','r') as bgfile:
#with open('2769/covariance_lists/bg_covCI_2769_603.txt','r') as bgfile:
#with open('2769/covariance_lists/bg_covCI_2769_604.txt','r') as bgfile:
bg_fnames = [line.rstrip('\n') for line in bgfile]
bgfile.close()
n_bg = len(bg_fnames)
# ORBIT 2768
with open('2768/bgref_covCI_2768_600.txt','r') as refbgfile:
#with open('2768/covariance_lists/bgref_covCI_2768_601.txt','r') as refbgfile:
#with open('2768/covariance_lists/bgref_covCI_2768_602.txt','r') as refbgfile:
#with open('2768/covariance_lists/bgref_covCI_2768_603.txt','r') as refbgfile:
#with open('2768/covariance_lists/bgref_covCI_2768_604.txt','r') as refbgfile:
# ORBIT 2769
#with open('2769/covariance_lists/bgref_covCI_2769_600.txt','r') as refbgfile:
#with open('2769/covariance_lists/bgref_covCI_2769_601.txt','r') as refbgfile:
#with open('2769/covariance_lists/bgref_covCI_2769_602.txt','r') as refbgfile:
#with open('2769/covariance_lists/bgref_covCI_2769_603.txt','r') as refbgfile:
#with open('2769/covariance_lists/bgref_covCI_2769_604.txt','r') as refbgfile:
refbg_fnames = [line.rstrip('\n') for line in refbgfile]
refbgfile.close()
n_refbg = len(refbg_fnames)
#n_lc = 2
n_RUNS = n_lc
# set up all of the final output variables and the number of files to go thru
energy = [0.3, 0.45, 0.55, 0.7, 0.9, 1.25, 1.75, 3.0, 5.0, 7.0, 9.0]
energy = energy[:n_RUNS]
Df_LF = 4.0*pow(10,-4.0) - 1.0*pow(10,-4.0)
Df_MF = 1.5*pow(10,-3.0) - 0.4*pow(10,-3.0)
Df_HF = 4.0*pow(10,-3.0) - 2.0*pow(10,-3.0)
plt.rc('font',family='serif')
# do the stuff
for RUN in range (0,n_RUNS):
print "RUN NUMBER: ", RUN+1
lcfits = fits.open(lc_fnames[RUN])
lcdata = lcfits[1].data
lcfits.close()
lc_t_raw = lcdata.field('TIME') ; lc_t_raw = lc_t_raw - lc_t_raw[0]
lc_r_raw = lcdata.field('RATE')
lc_e_raw = lcdata.field('ERROR')
bgfits = fits.open(bg_fnames[RUN])
bgdata = bgfits[1].data
bgfits.close()
bg_t_raw = bgdata.field('TIME') ; bg_t_raw = bg_t_raw - bg_t_raw[0]
bg_r_raw = bgdata.field('RATE')
bg_e_raw = bgdata.field('ERROR')
reffits = fits.open(ref_fnames[RUN])
refdata = reffits[1].data
reffits.close()
ref_t_raw = refdata.field('TIME') ; ref_t_raw = ref_t_raw - ref_t_raw[0]
ref_r_raw = refdata.field('RATE')
ref_e_raw = refdata.field('ERROR')
refbgfits = fits.open(refbg_fnames[RUN])
refbgdata = refbgfits[1].data
refbgfits.close()
refbg_t_raw = refbgdata.field('TIME') ; refbg_t_raw = refbg_t_raw - refbg_t_raw[0]
refbg_r_raw = refbgdata.field('RATE')
refbg_e_raw = refbgdata.field('ERROR')
#print "Mean Energy = ", energy[RUN]
lc_t, lc_r, lc_e, idx, avg_rate = data_cleaner(lc_t_raw, lc_r_raw, lc_e_raw) ; print "Average count rate = ", avg_rate
bg_t, bg_r, bg_e, bg_idx, avg_bg_rate = data_cleaner(bg_t_raw, bg_r_raw, bg_e_raw) ; print "Average background rate = ", avg_bg_rate
ref_t, ref_r, ref_e, ref_idx, avg_ref_rate = data_cleaner(ref_t_raw, ref_r_raw, ref_e_raw) ; print "Average ref count rate = ", avg_ref_rate
refbg_t, refbg_r, refbg_e, refbg_idx, avg_refbg_rate = data_cleaner(refbg_t_raw, refbg_r_raw, refbg_e_raw) ; print "Average ref count rate = ", avg_refbg_rate
# performing the DFT
n_bins = len(lc_t)
k = np.arange(n_bins-1)
frq = k/max(lc_t)
DFT = np.fft.fft(lc_r) #/n
DFT_ref = np.fft.fft(ref_r)
t_bins = lc_t[:-1]
dt = t_bins[1] - t_bins[0]
# grabbing only the relevant parts of frq and DFT
half_n_bins = int((n_bins-1.0)/2.0)
frq = frq[range(half_n_bins)]
DFT = DFT[range(half_n_bins)]
DFT_ref = DFT_ref[range(half_n_bins)]
df = frq[1] - frq[0]
# computing the PSD and background level
PSD = (2.0*dt*abs(DFT)**2.0)/(n_bins*avg_rate**2.0)
PN_lev = 2.0*(avg_rate + avg_bg_rate)/(avg_rate**2.0)
PSD_ref = (2.0*dt*abs(DFT_ref)**2.0)/(n_bins*avg_ref_rate**2.0)
PN_ref = 2.0*(avg_ref_rate + avg_refbg_rate)/(avg_ref_rate**2.0)
if (RUN == 0):
w, h = n_lc, half_n_bins
r = [[0 for x in range(w)] for y in range(h)]
phi = [[0 for x in range(w)] for y in range(h)]
r_ref = [[0 for x in range(w)] for y in range(h)]
phi_ref = [[0 for x in range(w)] for y in range(h)]
CS = [[0 for x in range(w)] for y in range(h)]
# working with the DFT values
for i in range (0,half_n_bins):
r[i][RUN], phi[i][RUN] = cmath.polar(DFT[i])
r_ref[i][RUN], phi_ref[i][RUN] = cmath.polar(DFT_ref[i])
# compute the cross spectrum
for row in range (0,half_n_bins):
CS[row][RUN] = (r[row][RUN]*r_ref[row][RUN]) * np.exp((-1.0*phi[row][RUN] + phi_ref[row][RUN])*1j)
# bin up the PSD and CS
C_LF = 0 ; C_MF = 0 ; C_HF = 0
PSD_LF_avg = 0 ; PSD_MF_avg = 0 ; PSD_HF_avg = 0
CS_LF_avg = 0 ; CS_MF_avg = 0 ; CS_HF_avg = 0
for i in range (0,len(frq)):
if (0.1e-3 <= frq[i] <= 0.4e-3):
C_LF += 1
PSD_LF_avg += PSD[i]
CS_LF_avg += CS[i][RUN]
if (0.4e-3 <= frq[i] <= 1.5e-3):
C_MF += 1
PSD_MF_avg += PSD[i]
CS_MF_avg += CS[i][RUN]
if (2e-3 <= frq[i] <= 4e-3):
C_HF += 1
PSD_HF_avg += PSD[i]
CS_HF_avg += CS[i][RUN]
PSD_LF_avg = PSD_LF_avg / C_LF
PSD_MF_avg = PSD_MF_avg / C_MF
PSD_HF_avg = PSD_HF_avg / C_HF
CS_LF_avg = CS_LF_avg / C_LF
CS_MF_avg = CS_MF_avg / C_MF
CS_HF_avg = CS_HF_avg / C_HF
C_ref_LF = 0 ; C_ref_MF = 0 ; C_ref_HF = 0
PSD_ref_LF_avg = 0 ; PSD_ref_MF_avg = 0 ; PSD_ref_HF_avg = 0
for i in range (0,len(frq)):
if (0.1e-3 <= frq[i] <= 0.4e-3):
C_ref_LF += 1
PSD_ref_LF_avg += PSD_ref[i]
if (0.4e-3 <= frq[i] <= 1.5e-3):
C_ref_MF += 1
PSD_ref_MF_avg += PSD_ref[i]
if (2e-3 <= frq[i] <= 4e-3):
C_ref_HF += 1
PSD_ref_HF_avg += PSD_ref[i]
PSD_ref_LF_avg = PSD_ref_LF_avg / C_ref_LF
PSD_ref_MF_avg = PSD_ref_MF_avg / C_ref_MF
PSD_ref_HF_avg = PSD_ref_HF_avg / C_ref_HF
if (RUN ==0):
COV_LF = np.zeros(n_lc)
COV_MF = np.zeros(n_lc)
COV_HF = np.zeros(n_lc)
nsq_LF = ((PSD_LF_avg - PN_lev)*PN_ref + (PSD_ref_LF_avg - PN_ref)*PN_lev + PN_lev*PN_ref)/C_LF
dfrq_LF = Df_LF
COV_LF[RUN] = avg_rate * np.sqrt( dfrq_LF*(abs(CS_LF_avg)**2.0 - nsq_LF) / (PSD_ref_LF_avg - PN_ref) )
nsq_MF = ((PSD_MF_avg - PN_lev)*PN_ref + (PSD_ref_MF_avg - PN_ref)*PN_lev + PN_lev*PN_ref)/C_MF
dfrq_MF = Df_MF
COV_MF[RUN] = avg_rate * np.sqrt( dfrq_MF*(abs(CS_MF_avg)**2.0 - nsq_MF) / (PSD_ref_MF_avg - PN_ref) )
nsq_HF = ((PSD_HF_avg - PN_lev)*PN_ref + (PSD_ref_HF_avg - PN_ref)*PN_lev + PN_lev*PN_ref)/C_HF
dfrq_HF = Df_HF
COV_HF[RUN] = avg_rate * np.sqrt( dfrq_HF*(abs(CS_HF_avg)**2.0 - nsq_HF) / (PSD_ref_HF_avg - PN_ref) )
w, h = 4, len(energy)
M = [[0 for x in range(w)] for y in range(h)]
for i in range (0,len(energy)):
M[i][0], M[i][1], M[i][2], M[i][3] = energy[i], COV_LF[i], COV_MF[i], COV_HF[i]
##outfile = open('E_COV_LMH_2768.txt','a')
#outfile = open('E_COV_LMH_2769.txt','a')
#np.savetxt(outfile,M)
#outfile.close()
plt.figure(1)
plt.loglog(energy, COV_LF, '-or', label = "LF")
plt.loglog(energy, COV_MF, '-og', label = "MF")
plt.loglog(energy, COV_HF, '-ob', label = "HF")
plt.xlabel("Energy [keV]")
plt.ylabel('keV$^2$ (Photons cm$^{-2}$ s$^{-1}$ keV$^{-1}$)')
plt.xlim(0.20,10.0)
plt.legend(loc=3,labelspacing=0.1,fontsize=16)
plt.show()
#plt.savefig(pic,bbox_inches='tight')
| mit |
mcdeaton13/Tax-Calculator | taxcalc/utils.py | 2 | 19436 | import numpy as np
import pandas as pd
from pandas import DataFrame
from collections import defaultdict
STATS_COLUMNS = ['_expanded_income', 'c00100', '_standard', 'c04470', 'c04600',
'c04800', 'c05200', 'c62100', 'c09600', 'c05800', 'c09200',
'_refund', 'c07100', '_ospctax', 's006']
# each entry in this array corresponds to the same entry in the array
# TABLE_LABELS below. this allows us to use TABLE_LABELS to map a
# label to the correct column in our distribution table
TABLE_COLUMNS = ['s006', 'c00100', 'num_returns_StandardDed', '_standard',
'num_returns_ItemDed', 'c04470', 'c04600', 'c04800', 'c05200',
'c62100', 'num_returns_AMT', 'c09600', 'c05800', 'c07100',
'c09200', '_refund', '_ospctax']
TABLE_LABELS = ['Returns', 'AGI', 'Standard Deduction Filers',
'Standard Deduction', 'Itemizers',
'Itemized Deduction', 'Personal Exemption',
'Taxable Income', 'Regular Tax', 'AMTI', 'AMT Filers', 'AMT',
'Tax before Credits', 'Non-refundable Credits',
'Tax before Refundable Credits', 'Refundable Credits',
'Revenue']
# used in our difference table to label the columns
DIFF_TABLE_LABELS = ["Tax Units with Tax Cut", "Tax Units with Tax Increase",
"Count", "Average Tax Change", "Total Tax Difference",
"Percent with Tax Increase", "Percent with Tax Decrease",
"Share of Overall Change"]
LARGE_INCOME_BINS = [-1e14, 0, 9999, 19999, 29999, 39999, 49999, 74999, 99999,
200000, 1e14]
SMALL_INCOME_BINS = [-1e14, 0, 4999, 9999, 14999, 19999, 24999, 29999, 39999,
49999, 74999, 99999, 199999, 499999, 999999, 1499999,
1999999, 4999999, 9999999, 1e14]
WEBAPP_INCOME_BINS = [-1e14, 0, 9999, 19999, 29999, 39999, 49999, 74999, 99999,
199999, 499999, 1000000, 1e14]
def extract_array(f):
"""
A sanity check decorator. When combined with numba.vectorize
or guvectorize, it provides the same capability as dataframe_vectorize
or dataframe_guvectorize
"""
def wrapper(*args, **kwargs):
arrays = [arg.values for arg in args]
return f(*arrays)
return wrapper
def expand_1D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
If necessary, pad out additional years by increasing the last given
year at the provided inflation rate.
"""
if isinstance(x, np.ndarray):
if len(x) >= num_years:
return x
else:
ans = np.zeros(num_years, dtype='f8')
ans[:len(x)] = x
if inflate:
extra = []
cur = x[-1]
for i in range(0, num_years - len(x)):
inf_idx = i + len(x) - 1
cur *= (1. + inflation_rates[inf_idx])
extra.append(cur)
else:
extra = [float(x[-1]) for i in
range(1, num_years - len(x) + 1)]
ans[len(x):] = extra
return ans.astype(x.dtype, casting='unsafe')
return expand_1D(np.array([x]), inflate, inflation_rates, num_years)
def expand_2D(x, inflate, inflation_rates, num_years):
"""
Expand the given data to account for the given number of budget years.
For 2D arrays, we expand out the number of rows until we have num_years
number of rows. For each expanded row, we inflate by the given inflation
rate.
"""
if isinstance(x, np.ndarray):
# Look for -1s and create masks if present
last_good_row = -1
keep_user_data_mask = []
keep_calc_data_mask = []
has_nones = False
for row in x:
keep_user_data_mask.append([1 if i != -1 else 0 for i in row])
keep_calc_data_mask.append([0 if i != -1 else 1 for i in row])
if not np.any(row == -1):
last_good_row += 1
else:
has_nones = True
if x.shape[0] >= num_years and not has_nones:
return x
else:
if has_nones:
c = x[:last_good_row + 1]
keep_user_data_mask = np.array(keep_user_data_mask)
keep_calc_data_mask = np.array(keep_calc_data_mask)
else:
c = x
ans = np.zeros((num_years, c.shape[1]))
ans[:len(c), :] = c
if inflate:
extra = []
cur = c[-1]
for i in range(0, num_years - len(c)):
inf_idx = i + len(c) - 1
cur = np.array(cur * (1. + inflation_rates[inf_idx]))
extra.append(cur)
else:
extra = [c[-1, :] for i in
range(1, num_years - len(c) + 1)]
ans[len(c):, :] = extra
if has_nones:
# Use masks to "mask in" provided data and "mask out"
# data we don't need (produced in rows with a None value)
ans = ans * keep_calc_data_mask
user_vals = x * keep_user_data_mask
ans = ans + user_vals
return ans.astype(c.dtype, casting='unsafe')
return expand_2D(np.array(x), inflate, inflation_rates, num_years)
def strip_Nones(x):
"""
Takes a list of scalar values or a list of lists.
If it is a list of scalar values, when None is encountered, we
return everything encountered before. If a list of lists, we
replace None with -1 and return
Parameters
----------
x: list
Returns
-------
list
"""
accum = []
for val in x:
if val is None:
return accum
if not isinstance(val, list):
accum.append(val)
else:
for i, v in enumerate(val):
if v is None:
val[i] = -1
accum.append(val)
return accum
def expand_array(x, inflate, inflation_rates, num_years):
"""
Dispatch to either expand_1D or expand2D depending on the dimension of x
Parameters
----------
x : value to expand
inflate: Boolean
As we expand, inflate values if this is True, otherwise, just copy
inflation_rate: float
Yearly inflation reate
num_years: int
Number of budget years to expand
Returns
-------
expanded numpy array
"""
x = np.array(strip_Nones(x))
try:
if len(x.shape) == 1:
return expand_1D(x, inflate, inflation_rates, num_years)
elif len(x.shape) == 2:
return expand_2D(x, inflate, inflation_rates, num_years)
else:
raise ValueError("Need a 1D or 2D array")
except AttributeError:
raise ValueError("Must pass a numpy array")
def count_gt_zero(agg):
return sum([1 for a in agg if a > 0])
def count_lt_zero(agg):
return sum([1 for a in agg if a < 0])
def weighted_count_lt_zero(agg, col_name, tolerance=-0.001):
return agg[agg[col_name] < tolerance]['s006'].sum()
def weighted_count_gt_zero(agg, col_name, tolerance=0.001):
return agg[agg[col_name] > tolerance]['s006'].sum()
def weighted_count(agg):
return agg['s006'].sum()
def weighted_mean(agg, col_name):
return (float((agg[col_name] * agg['s006']).sum()) /
float(agg['s006'].sum()))
def weighted_sum(agg, col_name):
return (agg[col_name] * agg['s006']).sum()
def weighted_perc_inc(agg, col_name):
return (float(weighted_count_gt_zero(agg, col_name)) /
float(weighted_count(agg)))
def weighted_perc_dec(agg, col_name):
return (float(weighted_count_lt_zero(agg, col_name)) /
float(weighted_count(agg)))
def weighted_share_of_total(agg, col_name, total):
return float(weighted_sum(agg, col_name)) / float(total)
def add_weighted_decile_bins(df, income_measure='_expanded_income'):
"""
Add a column of income bins based on each 10% of the income_measure,
weighted by s006.
The default income_measure is `expanded_income`, but `c00100` also works.
This function will server as a "grouper" later on.
"""
# First, sort by income_measure
df.sort(income_measure, inplace=True)
# Next, do a cumulative sum by the weights
df['cumsum_weights'] = np.cumsum(df['s006'].values)
# Max value of cum sum of weights
max_ = df['cumsum_weights'].values[-1]
# Create 10 bins and labels based on this cumulative weight
bins = [0] + list(np.arange(1, 11) * (max_ / 10.0))
labels = [range(1, 11)]
# Groupby weighted deciles
df['bins'] = pd.cut(df['cumsum_weights'], bins, labels)
return df
def add_income_bins(df, compare_with="soi", bins=None, right=True,
income_measure='_expanded_income'):
"""
Add a column of income bins of income_measure using pandas 'cut'.
This will serve as a "grouper" later on.
Parameters
----------
df: DataFrame object
the object to which we are adding bins
compare_with: String, optional
options for input: 'tpc', 'soi', 'webapp'
determines which types of bins will be added
default: 'soi'
bins: iterable of scalars, optional income breakpoints.
Follows pandas convention. The breakpoint is inclusive if
right=True. This argument overrides any choice of compare_with.
right : bool, optional
Indicates whether the bins include the rightmost edge or not.
If right == True (the default), then the bins [1,2,3,4]
indicate (1,2], (2,3], (3,4].
Returns
-------
df: DataFrame object
the original input that bins have been added to
"""
if not bins:
if compare_with == "tpc":
bins = LARGE_INCOME_BINS
elif compare_with == "soi":
bins = SMALL_INCOME_BINS
elif compare_with == "webapp":
bins = WEBAPP_INCOME_BINS
else:
msg = "Unknown compare_with arg {0}".format(compare_with)
raise ValueError(msg)
# Groupby income_measure bins
df['bins'] = pd.cut(df[income_measure], bins, right=right)
return df
def means_and_comparisons(df, col_name, gp, weighted_total):
"""
Using grouped values, perform aggregate operations
to populate
df: DataFrame for full results of calculation
col_name: the column name to calculate against
gp: grouped DataFrame
"""
# Who has a tax cut, and who has a tax increase
diffs = gp.apply(weighted_count_lt_zero, col_name)
diffs = DataFrame(data=diffs, columns=['tax_cut'])
diffs['tax_inc'] = gp.apply(weighted_count_gt_zero, col_name)
diffs['count'] = gp.apply(weighted_count)
diffs['mean'] = gp.apply(weighted_mean, col_name)
diffs['tot_change'] = gp.apply(weighted_sum, col_name)
diffs['perc_inc'] = gp.apply(weighted_perc_inc, col_name)
diffs['perc_cut'] = gp.apply(weighted_perc_dec, col_name)
diffs['share_of_change'] = gp.apply(weighted_share_of_total,
col_name, weighted_total)
return diffs
def weighted(df, X):
agg = df
for colname in X:
if not colname.startswith('s006'):
agg[colname] = df[colname] * df['s006']
return agg
def get_sums(df, na=False):
"""
Gets the unweighted sum of each column, saving the col name
and the corresponding sum
Returns
-------
pandas.Series
"""
sums = defaultdict(lambda: 0)
for col in df.columns.tolist():
if col != 'bins':
if na:
sums[col] = 'n/a'
else:
sums[col] = (df[col]).sum()
return pd.Series(sums, name='sums')
def results(c):
"""
Gets the results from the tax calculator and organizes them into a table
Parameters
----------
c : Calculator object
Returns
-------
DataFrame object
"""
outputs = []
for col in STATS_COLUMNS:
if hasattr(c, 'records') and hasattr(c, 'params'):
if hasattr(c.params, col):
outputs.append(getattr(c.params, col))
else:
outputs.append(getattr(c.records, col))
else:
outputs.append(getattr(c, col))
return DataFrame(data=np.column_stack(outputs), columns=STATS_COLUMNS)
def weighted_avg_allcols(df, cols, income_measure='_expanded_income'):
diff = DataFrame(df.groupby('bins', as_index=False).apply(weighted_mean,
income_measure),
columns=[income_measure])
for col in cols:
if (col == "s006" or col == 'num_returns_StandardDed' or
col == 'num_returns_ItemDed' or col == 'num_returns_AMT'):
diff[col] = df.groupby('bins', as_index=False)[col].sum()[col]
elif col != income_measure:
diff[col] = df.groupby('bins', as_index=False).apply(weighted_mean,
col)
return diff
def create_distribution_table(calc, groupby, result_type,
income_measure='_expanded_income'):
"""
Gets results given by the tax calculator, sorts them based on groupby, and
manipulates them based on result_type. Returns these as a table
Parameters
----------
calc : the Calculator object
groupby : String object
options for input: 'weighted_deciles', 'small_income_bins',
'large_income_bins', 'webapp_income_bins';
determines how the columns in the resulting DataFrame are sorted
result_type : String object
options for input: 'weighted_sum' or 'weighted_avg';
determines how the data should be manipulated
Notes
-----
Taxpayer Characteristics:
c04470 : Total itemized deduction
c00100 : AGI (Defecit)
c09600 : Alternative minimum tax
s006 : used to weight population
Returns
-------
DataFrame object
"""
res = results(calc)
# weight of returns with positive AGI and
# itemized deduction greater than standard deduction
res['c04470'] = res['c04470'].where(((res['c00100'] > 0) &
(res['c04470'] > res['_standard'])), 0)
# weight of returns with positive AGI and itemized deduction
res['num_returns_ItemDed'] = res['s006'].where(((res['c00100'] > 0) &
(res['c04470'] > 0)), 0)
# weight of returns with positive AGI and standard deduction
res['num_returns_StandardDed'] = res['s006'].where(((res['c00100'] > 0) &
(res['_standard'] > 0)),
0)
# weight of returns with positive Alternative Minimum Tax (AMT)
res['num_returns_AMT'] = res['s006'].where(res['c09600'] > 0, 0)
# sorts the data
if groupby == "weighted_deciles":
df = add_weighted_decile_bins(res, income_measure=income_measure)
elif groupby == "small_income_bins":
df = add_income_bins(res, compare_with="soi",
income_measure=income_measure)
elif groupby == "large_income_bins":
df = add_income_bins(res, compare_with="tpc",
income_measure=income_measure)
elif groupby == "webapp_income_bins":
df = add_income_bins(res, compare_with="webapp",
income_measure=income_measure)
else:
err = ("groupby must be either 'weighted_deciles' or"
"'small_income_bins' or 'large_income_bins' or"
"'webapp_income_bins'")
raise ValueError(err)
# manipulates the data
pd.options.display.float_format = '{:8,.0f}'.format
if result_type == "weighted_sum":
df = weighted(df, STATS_COLUMNS)
gp_mean = df.groupby('bins', as_index=False)[TABLE_COLUMNS].sum()
gp_mean.drop('bins', axis=1, inplace=True)
sum_row = get_sums(df)[TABLE_COLUMNS]
elif result_type == "weighted_avg":
gp_mean = weighted_avg_allcols(df, TABLE_COLUMNS,
income_measure=income_measure)
sum_row = get_sums(df, na=True)[TABLE_COLUMNS]
else:
err = ("result_type must be either 'weighted_sum' or 'weighted_avg")
raise ValueError(err)
return gp_mean.append(sum_row)
def create_difference_table(calc1, calc2, groupby,
income_measure='_expanded_income'):
"""
Gets results given by the two different tax calculators and outputs
a table that compares the differing results.
The table is sorted according the the groupby input.
Parameters
----------
calc1, the first Calculator object
calc2, the other Calculator object
groupby, String object
options for input: 'weighted_deciles', 'small_income_bins',
'large_income_bins', 'webapp_income_bins'
determines how the columns in the resulting DataFrame are sorted
Returns
-------
DataFrame object
"""
res1 = results(calc1)
res2 = results(calc2)
if groupby == "weighted_deciles":
df = add_weighted_decile_bins(res2, income_measure=income_measure)
elif groupby == "small_income_bins":
df = add_income_bins(res2, compare_with="soi",
income_measure=income_measure)
elif groupby == "large_income_bins":
df = add_income_bins(res2, compare_with="tpc",
income_measure=income_measure)
elif groupby == "webapp_income_bins":
df = add_income_bins(res2, compare_with="webapp",
income_measure=income_measure)
else:
err = ("groupby must be either"
"'weighted_deciles' or 'small_income_bins'"
"or 'large_income_bins' or 'webapp_income_bins'")
raise ValueError(err)
# Difference in plans
# Positive values are the magnitude of the tax increase
# Negative values are the magnitude of the tax decrease
res2['tax_diff'] = res2['_ospctax'] - res1['_ospctax']
diffs = means_and_comparisons(res2, 'tax_diff',
df.groupby('bins', as_index=False),
(res2['tax_diff'] * res2['s006']).sum())
sum_row = get_sums(diffs)[diffs.columns.tolist()]
diffs = diffs.append(sum_row)
pd.options.display.float_format = '{:8,.0f}'.format
srs_inc = ["{0:.2f}%".format(val * 100) for val in diffs['perc_inc']]
diffs['perc_inc'] = pd.Series(srs_inc, index=diffs.index)
srs_cut = ["{0:.2f}%".format(val * 100) for val in diffs['perc_cut']]
diffs['perc_cut'] = pd.Series(srs_cut, index=diffs.index)
srs_change = ["{0:.2f}%".format(val * 100)
for val in diffs['share_of_change']]
diffs['share_of_change'] = pd.Series(srs_change, index=diffs.index)
# columns containing weighted values relative to the binning mechanism
non_sum_cols = [x for x in diffs.columns.tolist()
if 'mean' in x or 'perc' in x]
for col in non_sum_cols:
diffs.loc['sums', col] = 'n/a'
return diffs
| mit |
pybel/pybel-tools | src/pybel_tools/document_utils.py | 1 | 6764 | # -*- coding: utf-8 -*-
"""Utilities to merge multiple BEL documents on the same topic."""
import logging
from typing import Iterable, Mapping, Optional, Set, TextIO, Union
from xml.etree import ElementTree
import pandas as pd
import requests
from bel_resources import make_knowledge_header
from pybel.utils import ensure_quotes
__all__ = [
'write_boilerplate',
]
logger = logging.getLogger(__name__)
abstract_url_fmt = "http://togows.dbcls.jp/entry/ncbi-pubmed/{}/abstract"
title_url_fmt = "http://togows.dbcls.jp/entry/ncbi-pubmed/{}/title"
#: SO gives short citation information
so_url_fmt = "http://togows.dbcls.jp/entry/ncbi-pubmed/{}/so"
def make_pubmed_abstract_group(pmids: Iterable[Union[str, int]]) -> Iterable[str]:
"""Build a skeleton for the citations' statements.
:param pmids: A list of PubMed identifiers
:return: An iterator over the lines of the citation section
"""
for pmid in set(pmids):
yield ''
res = requests.get(title_url_fmt.format(pmid))
title = res.content.decode('utf-8').strip()
yield f'SET Citation = {{"{title}", "{pmid}"}}'
res = requests.get(abstract_url_fmt.format(pmid))
abstract = res.content.decode('utf-8').strip()
yield f'SET Evidence = "{abstract}"'
yield '\nUNSET Evidence\nUNSET Citation'
def _sanitize(s):
if s is not None:
return s.strip().replace('\n', '')
#: Allows for querying the Entrez Gene Summary utility by formatting with an entrez id or list of comma seperated ids
PUBMED_GENE_QUERY_URL = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=gene&id={}'
def get_entrez_gene_data(entrez_ids: Iterable[Union[str, int]]):
"""Get gene info from Entrez."""
url = PUBMED_GENE_QUERY_URL.format(','.join(str(x).strip() for x in entrez_ids))
response = requests.get(url)
tree = ElementTree.fromstring(response.content)
return {
element.attrib['uid']: {
'summary': _sanitize(element.find('Summary').text),
'description': element.find('Description').text
}
for element in tree.findall('./DocumentSummarySet/DocumentSummary')
}
def make_pubmed_gene_group(entrez_ids: Iterable[Union[str, int]]) -> Iterable[str]:
"""Build a skeleton for gene summaries.
:param entrez_ids: A list of Entrez Gene identifiers to query the PubMed service
:return: An iterator over statement lines for NCBI Entrez Gene summaries
"""
url = PUBMED_GENE_QUERY_URL.format(','.join(str(x).strip() for x in entrez_ids))
response = requests.get(url)
tree = ElementTree.fromstring(response.content)
for x in tree.findall('./DocumentSummarySet/DocumentSummary'):
yield '\n# {}'.format(x.find('Description').text)
yield 'SET Citation = {{"Other", "PubMed Gene", "{}"}}'.format(x.attrib['uid'])
yield 'SET Evidence = "{}"'.format(x.find('Summary').text.strip().replace('\n', ''))
yield '\nUNSET Evidence\nUNSET Citation'
def write_boilerplate(
name: str,
version: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
contact: Optional[str] = None,
copyright: Optional[str] = None,
licenses: Optional[str] = None,
disclaimer: Optional[str] = None,
namespace_url: Optional[Mapping[str, str]] = None,
namespace_patterns: Optional[Mapping[str, str]] = None,
annotation_url: Optional[Mapping[str, str]] = None,
annotation_patterns: Optional[Mapping[str, str]] = None,
annotation_list: Optional[Mapping[str, Set[str]]] = None,
pmids: Optional[Iterable[Union[str, int]]] = None,
entrez_ids: Optional[Iterable[Union[str, int]]] = None,
file: Optional[TextIO] = None,
) -> None:
"""Write a boilerplate BEL document, with standard document metadata, definitions.
:param name: The unique name for this BEL document
:param contact: The email address of the maintainer
:param description: A description of the contents of this document
:param authors: The authors of this document
:param version: The version. Defaults to current date in format ``YYYYMMDD``.
:param copyright: Copyright information about this document
:param licenses: The license applied to this document
:param disclaimer: The disclaimer for this document
:param namespace_url: an optional dictionary of {str name: str URL} of namespaces
:param namespace_patterns: An optional dictionary of {str name: str regex} namespaces
:param annotation_url: An optional dictionary of {str name: str URL} of annotations
:param annotation_patterns: An optional dictionary of {str name: str regex} of regex annotations
:param annotation_list: An optional dictionary of {str name: set of names} of list annotations
:param pmids: A list of PubMed identifiers to auto-populate with citation and abstract
:param entrez_ids: A list of Entrez identifiers to autopopulate the gene summary as evidence
:param file: A writable file or file-like. If None, defaults to :data:`sys.stdout`
"""
lines = make_knowledge_header(
name=name,
version=version or '1.0.0',
description=description,
authors=authors,
contact=contact,
copyright=copyright,
licenses=licenses,
disclaimer=disclaimer,
namespace_url=namespace_url,
namespace_patterns=namespace_patterns,
annotation_url=annotation_url,
annotation_patterns=annotation_patterns,
annotation_list=annotation_list,
)
for line in lines:
print(line, file=file)
if pmids is not None:
for line in make_pubmed_abstract_group(pmids):
print(line, file=file)
if entrez_ids is not None:
for line in make_pubmed_gene_group(entrez_ids):
print(line, file=file)
def replace_selventa_namespaces(path: str) -> None:
"""Update SFAM/SCOM namespaces to FamPlex."""
df = pd.read_csv(
'https://raw.githubusercontent.com/johnbachman/famplex/master/equivalences.csv',
names=['namespace', 'label', 'famplex']
)
# Filter to namespace BEL
df = df[df.namespace == 'BEL']
mapping_dict = {}
for _, label, famplex in df.values:
for p in 'SCOMP', 'SFAM':
mapping_dict[f'{p}:{ensure_quotes(label)}'] = f'FPLX:{ensure_quotes(famplex)}'
lines = []
with open(path) as file:
for line in file:
for k, v in mapping_dict.items():
if k in line:
print(f'Upgrating line {k} to {v}')
line = line.replace(k, v)
lines.append(line.strip('\n'))
with open(path, 'w') as file:
for line in lines:
print(line, file=file)
| mit |
ayakubovich/bat-country | setup.py | 6 | 1106 | from distutils.core import setup
setup(
name='bat-country',
packages=['batcountry'],
version='0.2',
description='A lightweight, extendible, easy to use Python package for deep dreaming and image generation with Caffe and CNNs',
author='Adrian Rosebrock',
author_email='[email protected]',
url='https://github.com/jrosebr1/bat-country',
download_url='https://github.com/jrosebr1/bat-country/tarball/0.1',
license='MIT',
install_requires=[
'Pillow==2.9.0',
'argparse==1.2.1',
'decorator==3.4.2',
'imutils==0.2.2',
'matplotlib==1.4.3',
'mock==1.0.1',
'networkx==1.9.1',
'nose==1.3.7',
'numpy==1.9.2',
'protobuf==2.6.1',
'pyparsing==2.0.3',
'python-dateutil==2.4.2',
'pytz==2015.4',
'scikit-image==0.11.3',
'scipy==0.15.1',
'six==1.9.0',
'wsgiref==0.1.2',
],
keywords=['computer vision', 'machine learning', 'deep learning',
'convolutional neural network', 'deep dream', 'inceptionism'],
classifiers=[],
)
| mit |
silgon/rlpy | rlpy/Domains/RCCar.py | 3 | 5073 | """RC-Car domain"""
from rlpy.Tools import plt, bound, wrap, mpatches, id2vec
import matplotlib as mpl
from .Domain import Domain
import numpy as np
__author__ = "Alborz Geramifard"
class RCCar(Domain):
"""
This is a simple simulation of Remote Controlled Car in a room with no obstacle.
**STATE:** 4 continuous dimensions:
* x, y: (center point on the line connecting the back wheels),
* speed (S on the webpage)
* heading (theta on the webpage) w.r.t. body frame.
positive values => turning right, negative values => turning left
**ACTIONS:** Two action dimensions:
* accel [forward, coast, backward]
* phi [turn left, straight, turn Right]
This leads to 3 x 3 = 9 possible actions.
**REWARD:** -1 per step, 100 at goal.
**REFERENCE:**
.. seealso::
http://planning.cs.uiuc.edu/node658.html
"""
actions_num = 9
state_space_dims = 4
continuous_dims = np.arange(state_space_dims)
ROOM_WIDTH = 3 # in meters
ROOM_HEIGHT = 2 # in meters
XMIN = -ROOM_WIDTH / 2.0
XMAX = ROOM_WIDTH / 2.0
YMIN = -ROOM_HEIGHT / 2.0
YMAX = ROOM_HEIGHT / 2.0
ACCELERATION = .1
TURN_ANGLE = np.pi / 6
SPEEDMIN = -.3
SPEEDMAX = .3
HEADINGMIN = -np.pi
HEADINGMAX = np.pi
INIT_STATE = np.array([0.0, 0.0, 0.0, 0.0])
STEP_REWARD = -1
GOAL_REWARD = 0
GOAL = [.5, .5]
GOAL_RADIUS = .1
actions = np.outer([-1, 0, 1], [-1, 0, 1])
discount_factor = .9
episodeCap = 10000
delta_t = .1 # time between steps
CAR_LENGTH = .3 # L on the webpage
CAR_WIDTH = .15
# The location of rear wheels if the car facing right with heading 0
REAR_WHEEL_RELATIVE_LOC = .05
# Used for visual stuff:
domain_fig = None
X_discretization = 20
Y_discretization = 20
SPEED_discretization = 5
HEADING_discretization = 3
ARROW_LENGTH = .2
car_fig = None
def __init__(self, noise=0):
self.statespace_limits = np.array(
[[self.XMIN,
self.XMAX],
[self.YMIN,
self.YMAX],
[self.SPEEDMIN,
self.SPEEDMAX],
[self.HEADINGMIN,
self.HEADINGMAX]])
self.Noise = noise
super(RCCar, self).__init__()
def step(self, a):
x, y, speed, heading = self.state
# Map a number between [0,8] to a pair. The first element is
# acceleration direction. The second one is the indicator for the wheel
acc, turn = id2vec(a, [3, 3])
acc -= 1 # Mapping acc to [-1, 0 1]
turn -= 1 # Mapping turn to [-1, 0 1]
# Calculate next state
nx = x + speed * np.cos(heading) * self.delta_t
ny = y + speed * np.sin(heading) * self.delta_t
nspeed = speed + acc * self.ACCELERATION * self.delta_t
nheading = heading + speed / self.CAR_LENGTH * \
np.tan(turn * self.TURN_ANGLE) * self.delta_t
# Bound values
nx = bound(nx, self.XMIN, self.XMAX)
ny = bound(ny, self.YMIN, self.YMAX)
nspeed = bound(nspeed, self.SPEEDMIN, self.SPEEDMAX)
nheading = wrap(nheading, self.HEADINGMIN, self.HEADINGMAX)
# Collision to wall => set the speed to zero
if nx == self.XMIN or nx == self.XMAX or ny == self.YMIN or ny == self.YMAX:
nspeed = 0
ns = np.array([nx, ny, nspeed, nheading])
self.state = ns.copy()
terminal = self.isTerminal()
r = self.GOAL_REWARD if terminal else self.STEP_REWARD
return r, ns, terminal, self.possibleActions()
def s0(self):
self.state = self.INIT_STATE.copy()
return self.state.copy(), self.isTerminal(), self.possibleActions()
def isTerminal(self):
return np.linalg.norm(self.state[0:2] - self.GOAL) < self.GOAL_RADIUS
def showDomain(self, a):
s = self.state
# Plot the car
x, y, speed, heading = s
car_xmin = x - self.REAR_WHEEL_RELATIVE_LOC
car_ymin = y - self.CAR_WIDTH / 2.
if self.domain_fig is None: # Need to initialize the figure
self.domain_fig = plt.figure()
# Goal
plt.gca(
).add_patch(
plt.Circle(
self.GOAL,
radius=self.GOAL_RADIUS,
color='g',
alpha=.4))
plt.xlim([self.XMIN, self.XMAX])
plt.ylim([self.YMIN, self.YMAX])
plt.gca().set_aspect('1')
# Car
if self.car_fig is not None:
plt.gca().patches.remove(self.car_fig)
self.car_fig = mpatches.Rectangle(
[car_xmin,
car_ymin],
self.CAR_LENGTH,
self.CAR_WIDTH,
alpha=.4)
rotation = mpl.transforms.Affine2D().rotate_deg_around(
x, y, heading * 180 / np.pi) + plt.gca().transData
self.car_fig.set_transform(rotation)
plt.gca().add_patch(self.car_fig)
plt.draw()
| bsd-3-clause |
tlinnet/qNLS | plot_test.py | 1 | 1400 | import qNLS
import matplotlib.pyplot as plt
import nmrglue
import numpy
import copy
import sys
dic_ref, udic_ref, data_ref = qNLS.read_spectrum(file='test.ft2')
table = nmrglue.analysis.peakpick.pick(data=data_ref, pthres=100000., nthres=None, algorithm='connected', est_params=False, cluster=False, table=True)
uc_dim0 = nmrglue.pipe.make_uc(dic_ref, data_ref, dim=0)
uc_dim1 = nmrglue.pipe.make_uc(dic_ref, data_ref, dim=1)
y_axisppm = uc_dim0.unit(table['Y_AXIS'], "ppm")
x_axisppm = uc_dim1.unit(table['X_AXIS'], "ppm")
ax = qNLS.contour_plot(dic=dic_ref, udic=udic_ref, data=data_ref, contour_start=30000., contour_num=10, contour_factor=1.20, ppm=True, show=False, table=table)
dic_resi, udic_resi, data_resi = qNLS.read_spectrum(file='test_resi.ft2')
ax = qNLS.contour_plot(dic=dic_resi, udic=udic_resi, data=data_resi, contour_start=5000., contour_num=5, contour_factor=1.20, ppm=True, show=False, table=table)
int_arr = data_ref[table['Y_AXIS'].astype(int), table['X_AXIS'].astype(int)]
plt.close("all")
#plt.show()
dic_resi, udic_resi, data_resi = qNLS.read_spectrum(file='test_resi.ft2')
int_arr_resi = data_resi[table['Y_AXIS'].astype(int), table['X_AXIS'].astype(int)] + int_arr
qNLS.write_peak_list(filename="test.tab", x_axis_pts=table['X_AXIS'], y_axis_pts=table['Y_AXIS'], x_axis_ppm=x_axisppm, y_axis_ppm=y_axisppm, int_ref=int_arr, list_int_cov=[int_arr, int_arr_resi])
| gpl-3.0 |
gclenaghan/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 25 | 2252 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
lbishal/scikit-learn | sklearn/datasets/base.py | 22 | 22973 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer():
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
| bsd-3-clause |
ndhuang/python-lib | plot2Ddist.py | 1 | 9018 | import pylab
import numpy as np
import pymc
import matplotlib.patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy.stats
def frac_inside_poly(x,y,polyxy):
"""Calculate the fraction of points x,y inside polygon polyxy.
polyxy -- list of x,y coordinates of vertices.
"""
xy = np.vstack([x,y]).transpose()
return float(sum(matplotlib.nxutils.points_inside_poly(xy, polyxy)))/len(x)
def fracs_inside_contours(x, y, contours):
"""Calculate the fraction of points x,y inside each contour level.
contours -- a matplotlib.contour.QuadContourSet
"""
fracs = []
for (icollection, collection) in enumerate(contours.collections):
path = collection.get_paths()[0]
pathxy = path.vertices
frac = frac_inside_poly(x,y,pathxy)
fracs.append(frac)
return fracs
def frac_label_contours(x, y, contours, format='%.3f'):
"""Label contours according to the fraction of points x,y inside.
"""
fracs = fracs_inside_contours(x,y,contours)
levels = contours.levels
labels = {}
for (level, frac) in zip(levels, fracs):
labels[level] = format % frac
contours.clabel(fmt=labels)
def contour_enclosing(x, y, fractions, xgrid, ygrid, zvals,
axes, nstart = 200,
*args, **kwargs):
"""Plot contours encompassing specified fractions of points x,y.
"""
print fractions
# Generate a large set of contours initially.
contours = axes.contour(xgrid, ygrid, zvals, nstart,
extend='both')
# Set up fracs and levs for interpolation.
levs = contours.levels
fracs = np.array(fracs_inside_contours(x,y,contours))
sortinds = np.argsort(fracs)
levs = levs[sortinds]
fracs = fracs[sortinds]
# Find the levels that give the specified fractions.
levels = scipy.interp(fractions, fracs, levs)
# Remove the old contours from the graph.
for coll in contours.collections:
coll.remove()
# Reset the contours
contours.__init__(axes, xgrid, ygrid, zvals, levels, *args, **kwargs)
return contours
def plot2Ddist(variables, axeslist=None, truevalues=None,
trimto=None, thin=1, histbinslist=[100, 100],
labels=None, scaleview=True,
plotscatter=True, plothists=True, plotcontours=True,
contourKDEthin=1, contourNGrid=100,
contourFractions=[0.6827, 0.9545, 0.9973],
labelcontours=True, returncontours=False,
scatterstyle={}, histstyle={}, contourstyle={}, **styleArgs):
"""Plot joint distribution of two variables, with marginal histograms.
The resulting graphic includes (at your discretion):
* a scatter plot of the 2D distribution of the two variables
* estimated density contours for the distribution
* marginal histograms for each variable
See plot2Ddist_example.py for an example:
> plot2Ddist([a, b], truevalues=[intercept, slope], **styleargs)
Notes
-----
The contour plotting can be quite slow for large samples because
of the gaussian kernel density estimation. Try passing a larger
value for contourKDEthin to speed it up.
Inputs
------
variables -- list-like of length 2
a list of two array-like or pymc.Variable objects. The lengths
of the arrays or variable traces should be equal.
axeslist -- list-like of length 3
a list of three Matplotlib Axes for: the joint plot, marginal
x histogram, and marginal y histogram, respectively.
truevalues -- list-like of length 2
a list of the true values for each variable
trimto -- int
plot only the last trimto elements of each variable
thin -- int
plot only every thin-th element of each variable
histbinlist -- list-like of length 2
specify the bins (number or limits) for x and y marginal histograms.
labels -- list-like of two strings
the x and y axis labels
scaleview -- bool
whether to set the axes limits according to the plotted data
plotscatter, plothists, plotcontours -- bool
whether to plot the scatter, marginal histograms, and contours
scatterstyle, histstyle, contourstyle -- dict-like
additional keyword arguments for the plot, hist, or contour commands
contourKDEthin -- int
factor by which to thin the samples before calculating the
gaussian kernel density estimate for contouring
contourNGrid -- int
size of the grid to use (in each dimension) for the contour plotting
contourFractions -- list-like
countours are chosen to include the fractions of points specified here
labelcontours -- bool
whether to label the contours with the fraction of points enclosed
styleArgs --
leftover arguments are passed to both the plot and hist commands
"""
### Set up figures and axes. ###
if axeslist is None:
fig1 = pylab.figure(figsize=(6,6))
fig1.set_label('traces')
ax1 = pylab.gca()
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
ax3 = divider.append_axes("right", 1.5, pad=0.0, sharey=ax1)
for tl in (ax2.get_xticklabels() + ax2.get_yticklabels() +
ax3.get_xticklabels() + ax3.get_yticklabels()):
tl.set_visible(False)
axeslist = (ax1, ax2, ax3)
elif (len(axeslist) == 1):
ax1 = axeslist[0]
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
ax3 = divider.append_axes("right", 1.5, pad=0.0, sharey=ax1)
for tl in (ax2.get_xticklabels() + ax2.get_yticklabels() +
ax3.get_xticklabels() + ax3.get_yticklabels()):
tl.set_visible(False)
axeslist = (ax1, ax2, ax3)
else:
ax1, ax2, ax3 = axeslist
# Thin and trim variables.
if labels is None:
passedlabels = False
labels = [None, None]
else:
passedlabels = True
for (ivar, variable) in enumerate(variables):
# Get the trace if this is a pymc.Variable object.
if isinstance(variable, pymc.Variable):
variables[ivar] = variable.trace()
if hasattr(variable, '__name__') and not passedlabels:
labels[ivar] = variable.__name__
if trimto is None:
trimto = len(variables[0])
x = variables[0][-trimto::thin]
y = variables[1][-trimto::thin]
### Plot the variables. ###
# Plot 2D scatter of variables.
if plotscatter:
style = {'ls':'', 'marker':',', 'color':'r', 'alpha':'0.5'}
style.update(styleArgs)
style.update(scatterstyle)
ax1.plot(x, y, **style)
if plotcontours:
xkde = variables[0][-trimto::contourKDEthin]
ykde = variables[1][-trimto::contourKDEthin]
# Inspired by Abraham Flaxman's https://gist.github.com/626689
style = {'linewidths':2.0, 'alpha':0.75,
#'cmap':matplotlib.cm.Greys,
'zorder':10}
style.update(styleArgs)
style.update(contourstyle)
if 'color' in style:
style['colors'] = style['color']
gkde = scipy.stats.gaussian_kde([xkde,ykde])
xgrid, ygrid = np.mgrid[min(x):max(x):contourNGrid * 1j,
min(y):max(y):contourNGrid * 1j]
zvals = np.array(gkde.evaluate([xgrid.flatten(),
ygrid.flatten()])
).reshape(xgrid.shape)
contours = contour_enclosing(x, y, contourFractions,
xgrid, ygrid, zvals,
ax1, **style)
# Plot marginal histograms.
if plothists:
style = {'normed':True}
style.update(styleArgs)
style.update(histstyle)
ax2.hist(x, histbinslist[0], **style)
ax3.hist(y, histbinslist[1], orientation='horizontal', **style)
# Plot lines for the true values.
if truevalues is not None:
ax1.axvline(x=truevalues[0], ls=':', c='k')
ax1.axhline(y=truevalues[1], ls=':', c='k')
ax2.axvline(x=truevalues[0], ls=':', c='k')
ax3.axhline(y=truevalues[1], ls=':', c='k')
if scaleview:
ax2.relim()
ax3.relim()
ax1.relim()
ax2.autoscale_view(tight=True)
ax3.autoscale_view(tight=True)
ax1.autoscale_view(tight=True)
ax2.set_ylim(bottom=0)
ax3.set_xlim(left=0)
if labels[0] is not None:
ax1.set_xlabel(labels[0])
if labels[1] is not None:
ax1.set_ylabel(labels[1])
if plotcontours and labelcontours:
frac_label_contours(x, y, contours)
if plotcontours and returncontours:
return axeslist, contours
else:
return axeslist
| mit |
metalpen1984/SciTool_Py | gridtransU.py | 1 | 15622 | #!/usr/bin/python
#Purpose: 1. To convert the grid data of following format to each other:
# .sa .xyz .asc .pfb
# (simple ascii, xyz, ascii grid, parflow binary)
# 2. To simply plot the figure from the single output file.
#ChangeLog: 20150429: Changing the reading method of read pfb. Make it read faster.
#
import re,math
import struct
plot_funk=1
try:
import numpy as numpy
except ImportError:
print("You no install numpy? ")
print("You could not use the plot function of this script until you install Numpy")
plot_funk=0
try:
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
from pylab import *
import matplotlib.animation as animation
except ImportError:
print("You could not use the plot function of this script until you install Matplotlib")
plot_funk=0
# -----
# Strip the blank while readline
# -----
#Read and Strip the Blanks
#Default re_pattern : \s
# if len(re_pattern) == 0:
# str_arr=re.split('\s',fdata.readline())
# elif len(re_pattern) == 1:
# str_arr=re.split(re_pattern,fdata.readline())
# else:
# print("Wrong for re_pattern, Bug#001")
# new_arr=[]
# for s in str_arr:
# if s =="":
# pass
# else:
# new_arr.append(s)
# return new_arr
def stripblnk(arr,*num_typ):
new_arr=[]
for i in arr:
if i == "":
pass
else:
if num_typ[0] == 'int':
new_arr.append(int(i))
elif num_typ[0] == 'float':
new_arr.append(float(i))
elif num_typ[0] == '':
new_arr.append(i)
else:
print("WRONG num_typ!")
return new_arr
def tryopen(sourcefile,ag):
try:
opf=open(sourcefile,ag)
return opf
except :
print("No such file.")
return "error"
def checkformat(sourcefile):
fmtt=re.split('\.',sourcefile)
fmt=fmtt[len(fmtt)-1]
return fmt
# -----
# Read .pfb
# -----
def readpfb(sourcefile, if_silence = True):
opf = tryopen(sourcefile,'rb')
if if_silence == False:
print("reading source file {0:s}".format(sourcefile))
t1=struct.unpack('>ddd',opf.read(24))
tn=struct.unpack('>iii',opf.read(12))
td=struct.unpack('>ddd',opf.read(24))
tns=struct.unpack('>i',opf.read(4))
x1,y1,z1=t1
nx,ny,nz=tn
dx,dy,dz=td
ns=tns[0]
result_arr=[[[ 0 for ii in range(nx) ] for jj in range(ny)] for kk in range(nz)]
for isub in range(0,ns):
ix,iy,iz,nnx,nny,nnz,rx,ry,rz=struct.unpack('>9i',opf.read(36))
tmp_total = nnx * nny * nnz
tvalue = struct.unpack('>{0:d}d'.format(tmp_total), opf.read(8*tmp_total))
for k in range(nnz):
for j in range(nny):
for i in range(nnx):
result_arr[k+iz][j+iy][i+ix]=tvalue[ k*(nny*nnx) + j*nnx + i ]
opf.close()
if if_silence == False:
print("Completed reading pfb format from {0}".format(sourcefile))
return result_arr,nx,ny,nz,dx,dy,dz
# -----
# Read .sa
# -----
def readsa(sourcefile):
print("reading source file {0:s}".format(sourcefile))
result_arr=[]
opf = tryopen(sourcefile,'r')
headt=re.split('\s',opf.readline().strip())
head=stripblnk(headt,'int')
nx=int(head[0])
ny=int(head[1])
nz=int(head[2])
for j in range(0,ny):
tmp=[]
for i in range(0,nx):
ans=re.split('\s',opf.readline().strip())
tmp.append(float(ans[0]))
result_arr.append(tmp)
print("Completed reading sa format from {0}".format(sourcefile))
return result_arr,nx,ny,nz
# -----
# Read .dat
# -----
def readdat(sourcefile, str_null="noData", num_null=-999.999, num_pos=[]):
opf = tryopen(sourcefile,'r')
opfchk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
chk_lines = opfchk.readlines()
num_totallines = len(chk_lines)
ncols = 0
num_notnum = 0
for n in range(num_totallines):
line_in = chk_lines[n]
#print line_in
c_first = re.findall("\s",line_in.strip())
if c_first[0] == "#":
num_notnum += 1
else:
ncols = len( re.split("\s",line_in.strip()) )
break
if ncols == 0:
print("something wrong with the input file! (all comments?)")
else:
del opfchk
nrows=num_totallines - num_notnum
result_arr=[[num_null for j in range(nrows)] for i in range(ncols)]
for j in range(0,nrows):
# chk if comment
line_in = opf.readline()
c_first = re.findall(".",line_in.strip())[0]
if c_first == "#":
pass
else:
arr_in = re.split("\s",line_in.strip())
if len(num_pos)==0:
for i in range(ncols):
chk_val = arr_in[i]
if chk_val == str_null:
result_arr[i][j] = num_null
else:
result_arr[i][j] = num_null
else:
for i in num_pos:
chk_val = arr_in[i]
result_arr[i][j] = float(chk_val)
return result_arr
# -----
# Read .csv
# -----
def readcsv(sourcefile, str_null="noData", num_null=-999.999, if_allnum=False):
opf = tryopen(sourcefile,'r')
opfchk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
chk_lines = opfchk.readlines()
num_totallines = len(chk_lines)
ncols = 0
num_notnum = 0
for n in range(num_totallines):
line_in = chk_lines[n]
#print line_in
c_first = re.findall(".",line_in.strip())
if c_first[0] == "#":
num_notnum += 1
else:
ncols = len( re.split(",",line_in.strip()) )
break
if ncols == 0:
print("something wrong with the input file! (all comments?)")
else:
del opfchk
nrows=num_totallines - num_notnum
result_arr=[[num_null for j in range(nrows)] for i in range(ncols)]
for j in range(0,nrows + num_notnum):
# chk if comment
line_in = opf.readline()
c_first = re.findall(".",line_in.strip())[0]
if c_first == "#":
pass
else:
arr_in = re.split(",",line_in.strip())
for i in range(ncols):
chk_val = arr_in[i]
if chk_val == str_null:
result_arr[i][j-num_notnum] = num_null
else:
if if_allnum:
result_arr[i][j-num_notnum] = float(chk_val)
else:
result_arr[i][j-num_notnum] = chk_val
print("read ncol:{0:d}, nrow:{1:d}".format(ncols,nrows))
return result_arr
# -----
# Read .asc (ascii grid from ESRI)
# -----
def readascgrid(sourcefile):
opf = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
ncols=int(re.split('\s',opf.readline().strip())[1])
nrows=int(re.split('\s',opf.readline().strip())[1])
xllcorner=float(re.split('\s',opf.readline().strip())[1])
yllcorner=float(re.split('\s',opf.readline().strip())[1])
cellsize=float(re.split('\s',opf.readline().strip())[1])
nodata_v=float(re.split('\s',opf.readline().strip())[1])
result_arr=[]
for j in range(0,nrows):
valuet=int(re.split('\s',opf.readline().strip()))
result_arr.append(valuet)
print("Completed reading ascii-grid format from {0}".format(sourcefile))
return result_arr,ncols,nrows,xllcorner,yllcorner,cellsize,nodata_v
# -----
# Read .xyz
# -----
def readxyz(sourcefile,*nxny):
chk = tryopen(sourcefile,'r')
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(nxny) == 2:
print("Specific nx and ny is indicated.")
ncol=nxny[0]
nrow=nxny[1]
elif len(nxny) == 0:
print("Checking the nx and ny")
count=1
k=chk.readlines()
start_col=float(re.split('\s',k[0].strip())[0])
check = -9999
while check != 0.0:
check_col=float(re.split('\s',k[count].strip())[0])
check=check_col-start_col
if check ==0.0 :
ncol=count
else:
count=count+1
nrow=len(k)/ncol
for j in range(0,nrow):
valuex=[]
for i in range(0,ncol):
tline=re.split('\s',opf.readline().strip())
line=stripblnk(tline,'float')
valuex.append(line[2])
if i == 0 and j == 0:
xll=float(line[0])
print('xll: {0}'.format(xll))
if i == 1 and j == 0:
refx=float(line[0])
print('refx: {0}'.format(refx))
if i == 0 and j == 1:
refy=float(line[1])
print('refy: {0}'.format(refy))
if i == ncol-1 and j == nrow-1:
yll=float(line[1])
print('yll: {0}'.format(yll))
result_arr.append(valuex)
#dx=xll-refx
#dy=refy-yll
print("Completed reading ascii-grid format from {0}".format(sourcefile))
#print result_arr
return result_arr,ncol,nrow,xll,yll,#dx,dy
# -----
# Read Custom 1 col raster
# -----
def read1col(sourcefile,nx,ny,nz,*skiplines):
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(skiplines) == 0:
pass
else:
for m in range(0,skipelines[0]):
opf.readline()
for j in range(0,ny):
tmp=[]
for i in range(0,nx):
ans=re.split('\s',opf.readline().strip())
tmp.append(float(ans[0]))
result_arr.append(tmp)
print("Completed reading sa format from {0}".format(sourcefile))
return result_arr,nx,ny,nz
# -----
# Read Custom 2d grid raster
# -----
def read2d(sourcefile,nx,ny,num_typ,*skiplines):
print("reading source file {0:s}".format(sourcefile))
opf = tryopen(sourcefile,'r')
result_arr=[]
if len(skiplines) == 0:
pass
else:
for m in range(0,skiplines[0]):
opf.readline()
for j in range(0,ny):
ans=re.split('\s',opf.readline().strip())
t_arr=stripblnk(ans,num_typ)
result_arr.append(t_arr)
return result_arr,nx,ny
# -----
# Write from 2D array to 1D array
# -----
# -----
# Write from 2D array to 2D array
# -----
# -----
# Write .xyz
# -----
def writexyz(write_file_name,input_arr,ncols,nrows,xllco,yllco,cellsize,nodata_value):
wtf=open(write_file_name)
for j in range(ncows):
for i in range(ncols):
wtf.write("{0} {1} {2}\n".format(xllco+i*cellsize,yllco+nrows*cellsize-i*cellsize,value[j][i]))
wtf.close()
# -----
# Write .pfb
# -----
def writepfb(write_file_name,input_arr,nx,ny,nz,dx=0,dy=0,dz=0,x=0,y=0,z=0,ns=1,nodata_value=-999.999):
wtf=open(write_file_name,"w")
wtf.write(struct.pack('>3d',x,y,z))
wtf.write(struct.pack('>3i',nx,ny,nz))
wtf.write(struct.pack('>3d',dx,dy,dz))
wtf.write(struct.pack('>1i',ns))
for isub in range(0,ns):
iz,iy,ix=int(z),int(y),int(x)
nnz,nny,nnx=int(nz),int(ny),int(nx)
wtf.write(struct.pack('>3i',0,0,0))
wtf.write(struct.pack('>3i',nx,ny,nz))
wtf.write(struct.pack('>3i',dx,dy,dz))
for i in range(iz,iz+nnz):
for j in range(iy,iy+nny):
for k in range(ix,ix+nnx):
wtf.write(struct.pack('>d',input_arr[i][j][k]))
wtf.close()
# -----
# Write .sa
# -----
def writesa(write_file_name,input_arr,nz,ny,nx):
wtf=open(write_file_name,"w")
wtf.write('{0} {1} {2}\n'.format(nx,ny,nz))
for k in range(0,nz):
for j in range(0,ny):
for i in range(0,nx):
wtf.write(str(input_arr[j][i]) + '\n' )
wtf.close()
# -----
# Write .asc
# -----
def writeasc(write_file_name,input_arr,ncols,nrows,xllco,yllco,cellsize,nodata_v):
wtf=open(write_file_name,'w')
wtf.write("ncols {0}\n".format(ncols))
wtf.write("nrows {0}\n".format(nrows))
wtf.write("xllcorner {0}\n".format(xllco))
wtf.write("yllcorner {0}\n".format(yllco))
wtf.write("cellsize {0}\n".format(cellsize))
wtf.write("NODATA_value {0}\n".format(nodata_v))
for j in range(0,nrows):
for i in range(0,ncols):
for x in input_arr[j]:
wtf.write("{0} ".format(x))
wtf.write("\n")
wtf.close()
# -----
# Write .csv
# -----
def writecsv(file_name, arr_input, arr_head=[]):
wtf = open(file_name, 'w')
if len(arr_head) != 0:
wtf.write("#")
for h in range(len(arr_head)):
wtf.write("{0:s}".format(arr_head[h]))
wtf.write("\n")
num_vars = len(arr_input)
num_length = len(arr_input[0])
for l in range(num_length):
for v in range(num_vars):
if v == 0:
wtf.write("{0:8.5f}".format(arr_input[v][l]))
else:
wtf.write(",{0:8.5f}".format(arr_input[v][l]))
wtf.write("\n")
print("Finishing writing out {0:s}".format(file_name))
wtf.close()
# -----
# Plot in default_im
# -----
def im_subplots(title,ax,figx,ncols,nrows,array,*inver):
figx=plt.figure()
ax=figx.add_subplot(111)
cax=ax.imshow(array)
#cax=ax.imshow('array[0][layer],vmin=%f,vmax=%f' % (float(vmin),float(vmax)))
ax.set_title(title)
ax.set_xlim((-0.5,ncols-0.5))
if len(inver) == 0:
ax.set_ylim((-0.5,nrows-0.5))
else:
ax.set_ylim((nrows-0.5,-0.5))
cbar= figx.colorbar(cax,format='%.5f')
# -----
# Plot .pfb in im
# -----
def pfb_im_subplots(title,ax,figx,array,ncols,nrows,layer,*inver):
figx=plt.figure()
#ax=figx.add_subplot('%d%d%d'%(col,row,pos))
ax=figx.add_subplot(111)
v_max = numpy.amax(array[layer])
v_min = v_max * -1
if v_max == v_min: v_max=v_min+0.5
cax=ax.imshow(array[layer],vmin=v_min,vmax=v_max,cmap='bwr',interpolation='nearest')
ax.set_title('File:{0:s}, Layer:{1:d}'.format(title,layer))
ax.set_xlim((-0.5,ncols-0.5))
if inver == True:
ax.set_ylim((nrows-0.5,-0.5))
else:
ax.set_ylim((-0.5,nrows-0.5))
cbar= figx.colorbar(cax,format='%.5f')
numrows, numcols = array[layer].shape
def format_coord(x, y):
col = int(x+0.5)
row = int(y+0.5)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = array[layer][row][col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
ax.format_coord = format_coord
# -----
# Plot in default_line
# -----
def line_subplots(title,ax,figx,ncols,nrows,array,*inver):
figx=plt.figure()
ax=figx.add_subplot(111)
cax=ax.plot(array[ncols])
#cax=ax.imshow('array[0][layer],vmin=%f,vmax=%f' % (float(vmin),float(vmax)))
ax.set_title(title)
# ax.set_xlim((-0.5,ncols-0.5))
# if len(inver) == 0:
# ax.set_ylim((-0.5,nrows-0.5))
# else:
# ax.set_ylim((nrows-0.5,-0.5))
# cbar= figx.colorbar(cax,format='%.5f')
# -----
# Plot .pfb in im
# -----
def pfb_line_subplots(title,ax,figx,array,ncols,nrows,layer,*inver):
figx=plt.figure()
#ax=figx.add_subplot('%d%d%d'%(col,row,pos))
ax=figx.add_subplot(111)
ax.plot(array[layer][nrows])
ax.set_title('File:{0:s}, Layer:{1:d}'.format(title,layer))
#cbar= figx.colorbar(cax,format='%.5f')
# -----
# Chunk the value from pfb file
# -----
def pfb_chunk3d(pfba,fixplane,layer,ncols,nrows,nlayers):
#Must call the array[0] from readpfb
fp=fixplane
nx=ncols
ny=nrows
nz=nlayers
if fp == "x":
print("Chunk from X plane, layer: {0}".format(layer))
value=zeros((nz,ny))
for j in range(nz):
for i in range(ny):
value[j][i]=pfba[j][i][layer]
return value,ny,nz
elif fp =="y":
print("Chunk from Y plane, layer: {0}".format(layer))
value=zeros((nz,nx))
for j in range(nz):
for i in range(nx):
value[j][i]=pfba[j][layer][i]
return value,nx,nz
elif fp =="z":
print("Chunk from Z plane, layer: {0}".format(layer))
value=zeros((ny,nx))
for j in range(ny):
for i in range(nx):
value[j][i]=pfba[layer][j][i]
return value,nx,ny
else:
print("Wrong fix plane, \"x\", \"y\" or \"z\" only")
print("Get chunk at {0}-plane,{1} layer".format(fixplane,layer))
# -----
# Persing arguments
# -----
| lgpl-3.0 |
EtiCui/Msc-UdeS | dataAnalysis/endtoend_distance.py | 1 | 3264 | #!/usr/bin/python
"""
These function can be used to calculate the average end to end distances of a backbone from a lammmps output.
Usage:
# dump_dataframe must be in pythonpath or working directory
from endtoend_distance import rf
rf,rf_std = rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100)
Requirement:
numpy
pandas
dump_dataframe.py
scipy
Limitations:
Coordinates must be unwrapped (ex:xu,yu,zu)
Each dump must be a file
TODO:
Function to read a trajectory from a single file
"""
from dump_dataframe import read_dump
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from glob import glob
def endtoend(filename, atoms_per_polymer, number_of_chains):
"""
Function to calculate the end to end distances of each polymer chains from a dump.
Args:
----
filename(string): Filename of the dump
atoms_per_polymer(int): The number of particles/atoms in a single chains
number_of_chains(int): Number of chains in the system
Returns:
----
endtoend_dists(array): Numpy array with the end-to-end distance for each chains
"""
# Read the dump, coordinates must be unwrapped
dump = read_dump(filename, wrap=False)
# Select only the useful columns
wanted_columns = ["xu", "yu", "zu"]
rf_df = dump["atom_df"][wanted_columns]
# Create an empty array which will contains the distances
endtoend_dists = np.zeros(number_of_chains)
i = 0
while i < number_of_chains:
# Calculate the distance between the fist and the last atoms in the
# backbone
endtoend_dists[i] = pdist(
rf_df.loc[[1 + atoms_per_polymer * i, atoms_per_polymer + atoms_per_polymer * i]])
i += 1
return endtoend_dists
def rf(first_frame=-1000, last_frame=-1, trajectory_step=10,atoms_per_polymer=184, number_of_chains=100):
"""
Function to calculate the Rf of a lammps trajectory.
Args:
----
first_frame(int): The first frame desired in the trajectory
last_frame(int): The frame to stop
trajectory_step(int): calculate only for each # of files
atoms_per_polymer(int): The number of atoms in the polymer chain
number_of_chains(int): The number of chains in the system
Returns:
----
Rfmean(float): The average end to end distances in the trajectory
Rfstd(float): The standard deviation of the Rf
"""
# List of all the dump in the trajectory
complete_trajectory = glob("*dump*")
# sort the list according to the number in the filename
complete_trajectory.sort(key=lambda f: int(filter(str.isdigit, f)))
# consider only the desired frames
desired_trajectory = complete_trajectory[first_frame:last_frame:trajectory_step]
#create a empty numpy array to contains the end to end distances for each chain (columns)
#for each step (time)
rf = np.zeros((len(desired_trajectory),number_of_chains))
i=0
# for each file in the trajectory
for f in desired_trajectory:
#calculate the end to end distances for each chain
rf[i] = endtoend(f, atoms_per_polymer, number_of_chains)
i+=1
#return the mean average distances with its standard deviation
return rf.mean(),rf.std()
| mit |
NelisVerhoef/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | benchmarks/bench_plot_svd.py | 325 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
apache/arrow | python/pyarrow/tests/test_feather.py | 4 | 22981 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import sys
import tempfile
import pytest
import hypothesis as h
import hypothesis.strategies as st
import numpy as np
import pyarrow as pa
import pyarrow.tests.strategies as past
from pyarrow.feather import (read_feather, write_feather, read_table,
FeatherDataset)
try:
from pandas.testing import assert_frame_equal
import pandas as pd
import pyarrow.pandas_compat
except ImportError:
pass
@pytest.fixture(scope='module')
def datadir(base_datadir):
return base_datadir / 'feather'
def random_path(prefix='feather_'):
return tempfile.mktemp(prefix=prefix)
@pytest.fixture(scope="module", params=[1, 2])
def version(request):
yield request.param
@pytest.fixture(scope="module", params=[None, "uncompressed", "lz4", "zstd"])
def compression(request):
yield request.param
TEST_FILES = None
def setup_module(module):
global TEST_FILES
TEST_FILES = []
def teardown_module(module):
for path in TEST_FILES:
try:
os.remove(path)
except os.error:
pass
@pytest.mark.pandas
def test_file_not_exist():
with pytest.raises(pa.ArrowIOError):
read_feather('test_invalid_file')
def _check_pandas_roundtrip(df, expected=None, path=None,
columns=None, use_threads=False,
version=None, compression=None,
compression_level=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(df, path, compression=compression,
compression_level=compression_level, version=version)
if not os.path.exists(path):
raise Exception('file not written')
result = read_feather(path, columns, use_threads=use_threads)
if expected is None:
expected = df
assert_frame_equal(result, expected)
def _check_arrow_roundtrip(table, path=None, compression=None):
if path is None:
path = random_path()
TEST_FILES.append(path)
write_feather(table, path, compression=compression)
if not os.path.exists(path):
raise Exception('file not written')
result = read_table(path)
assert result.equals(table)
def _assert_error_on_write(df, exc, path=None, version=2):
# check that we are raising the exception
# on writing
if path is None:
path = random_path()
TEST_FILES.append(path)
def f():
write_feather(df, path, version=version)
pytest.raises(exc, f)
def test_dataset(version):
num_values = (100, 100)
num_files = 5
paths = [random_path() for i in range(num_files)]
data = {
"col_" + str(i): np.random.randn(num_values[0])
for i in range(num_values[1])
}
table = pa.table(data)
TEST_FILES.extend(paths)
for index, path in enumerate(paths):
rows = (
index * (num_values[0] // num_files),
(index + 1) * (num_values[0] // num_files),
)
write_feather(table[rows[0]: rows[1]], path, version=version)
data = FeatherDataset(paths).read_table()
assert data.equals(table)
@pytest.mark.pandas
def test_float_no_nulls(version):
data = {}
numpy_dtypes = ['f4', 'f8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randn(num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_read_table(version):
num_values = (100, 100)
path = random_path()
TEST_FILES.append(path)
values = np.random.randint(0, 100, size=num_values)
columns = ['col_' + str(i) for i in range(100)]
table = pa.Table.from_arrays(values, columns)
write_feather(table, path, version=version)
result = read_table(path)
assert result.equals(table)
# Test without memory mapping
result = read_table(path, memory_map=False)
assert result.equals(table)
result = read_feather(path, memory_map=False)
assert_frame_equal(table.to_pandas(), result)
@pytest.mark.pandas
def test_float_nulls(version):
num_values = 100
path = random_path()
TEST_FILES.append(path)
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = ['f4', 'f8']
expected_cols = []
arrays = []
for name in dtypes:
values = np.random.randn(num_values).astype(name)
arrays.append(pa.array(values, mask=null_mask))
values[null_mask] = np.nan
expected_cols.append(values)
table = pa.table(arrays, names=dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_no_nulls(version):
data, arr = {}, []
numpy_dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
arr.append(values.astype(dtype))
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
table = pa.table(arr, names=numpy_dtypes)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_platform_numpy_integers(version):
data = {}
numpy_dtypes = ['longlong']
num_values = 100
for dtype in numpy_dtypes:
values = np.random.randint(0, 100, size=num_values)
data[dtype] = values.astype(dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_integer_with_nulls(version):
# pandas requires upcast to float dtype
path = random_path()
TEST_FILES.append(path)
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
arrays = []
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arrays.append(pa.array(values, mask=null_mask))
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
table = pa.table(arrays, names=int_dtypes)
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_no_nulls(version):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_boolean_nulls(version):
# pandas requires upcast to object dtype
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
table = pa.table([pa.array(values, mask=mask)], names=['bools'])
_check_arrow_roundtrip(table)
df = table.to_pandas()
_check_pandas_roundtrip(df, version=version)
def test_buffer_bounds_error(version):
# ARROW-1676
path = random_path()
TEST_FILES.append(path)
for i in range(16, 256):
table = pa.Table.from_arrays(
[pa.array([None] + list(range(i)), type=pa.float64())],
names=["arr"]
)
_check_arrow_roundtrip(table)
def test_boolean_object_nulls(version):
repeats = 100
table = pa.Table.from_arrays(
[np.array([False, None, True] * repeats, dtype=object)],
names=["arr"]
)
_check_arrow_roundtrip(table)
@pytest.mark.pandas
def test_delete_partial_file_on_error(version):
if sys.platform == 'win32':
pytest.skip('Windows hangs on to file handle for some reason')
class CustomClass:
pass
# strings will fail
df = pd.DataFrame(
{
'numbers': range(5),
'strings': [b'foo', None, 'bar', CustomClass(), np.nan]},
columns=['numbers', 'strings'])
path = random_path()
try:
write_feather(df, path, version=version)
except Exception:
pass
assert not os.path.exists(path)
@pytest.mark.pandas
def test_strings(version):
repeats = 1000
# Mixed bytes, unicode, strings coerced to binary
values = [b'foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
ex_values = [b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': ex_values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
# embedded nulls are ok
values = ['foo', None, 'bar', 'qux', None]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
expected = pd.DataFrame({'strings': values * repeats})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_empty_strings(version):
df = pd.DataFrame({'strings': [''] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_none(version):
df = pd.DataFrame({'all_none': [None] * 10})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_all_null_category(version):
# ARROW-1188
df = pd.DataFrame({"A": (1, 2, 3), "B": (None, None, None)})
df = df.assign(B=df.B.astype("category"))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_multithreaded_read(version):
data = {'c{}'.format(i): [''] * 10
for i in range(100)}
df = pd.DataFrame(data)
_check_pandas_roundtrip(df, use_threads=True, version=version)
@pytest.mark.pandas
def test_nan_as_null(version):
# Create a nan that is not numpy.nan
values = np.array(['foo', np.nan, np.nan * 2, 'bar'] * 10)
df = pd.DataFrame({'strings': values})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_category(version):
repeats = 1000
values = ['foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values * repeats})
df['strings'] = df['strings'].astype('category')
values = ['foo', None, 'bar', 'qux', None]
expected = pd.DataFrame({'strings': pd.Categorical(values * repeats)})
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_timestamp(version):
df = pd.DataFrame({'naive': pd.date_range('2016-03-28', periods=10)})
df['with_tz'] = (df.naive.dt.tz_localize('utc')
.dt.tz_convert('America/Los_Angeles'))
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_timestamp_with_nulls(version):
df = pd.DataFrame({'test': [pd.Timestamp(2016, 1, 1),
None,
pd.Timestamp(2016, 1, 3)]})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
@pytest.mark.xfail(reason="not supported", raises=TypeError)
def test_timedelta_with_nulls_v1():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=1)
@pytest.mark.pandas
def test_timedelta_with_nulls():
df = pd.DataFrame({'test': [pd.Timedelta('1 day'),
None,
pd.Timedelta('3 day')]})
_check_pandas_roundtrip(df, version=2)
@pytest.mark.pandas
def test_out_of_float64_timestamp_with_nulls(version):
df = pd.DataFrame(
{'test': pd.DatetimeIndex([1451606400000000001,
None, 14516064000030405])})
df['with_tz'] = df.test.dt.tz_localize('utc')
_check_pandas_roundtrip(df, version=version)
@pytest.mark.pandas
def test_non_string_columns(version):
df = pd.DataFrame({0: [1, 2, 3, 4],
1: [True, False, True, False]})
expected = df.rename(columns=str)
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
@pytest.mark.skipif(not os.path.supports_unicode_filenames,
reason='unicode filenames not supported')
def test_unicode_filename(version):
# GH #209
name = (b'Besa_Kavaj\xc3\xab.feather').decode('utf-8')
df = pd.DataFrame({'foo': [1, 2, 3, 4]})
_check_pandas_roundtrip(df, path=random_path(prefix=name),
version=version)
@pytest.mark.pandas
def test_read_columns(version):
df = pd.DataFrame({
'foo': [1, 2, 3, 4],
'boo': [5, 6, 7, 8],
'woo': [1, 3, 5, 7]
})
expected = df[['boo', 'woo']]
_check_pandas_roundtrip(df, expected, version=version,
columns=['boo', 'woo'])
def test_overwritten_file(version):
path = random_path()
TEST_FILES.append(path)
num_values = 100
np.random.seed(0)
values = np.random.randint(0, 10, size=num_values)
table = pa.table({'ints': values})
write_feather(table, path)
table = pa.table({'more_ints': values[0:num_values//2]})
_check_arrow_roundtrip(table, path=path)
@pytest.mark.pandas
def test_filelike_objects(version):
buf = io.BytesIO()
# the copy makes it non-strided
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=['a', 'b', 'c']).copy()
write_feather(df, buf, version=version)
buf.seek(0)
result = read_feather(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
def test_sparse_dataframe(version):
if not pa.pandas_compat._pandas_api.has_sparse:
pytest.skip("version of pandas does not support SparseDataFrame")
# GH #221
data = {'A': [0, 1, 2],
'B': [1, 0, 1]}
df = pd.DataFrame(data).to_sparse(fill_value=1)
expected = df.to_dense()
_check_pandas_roundtrip(df, expected, version=version)
@pytest.mark.pandas
def test_duplicate_columns_pandas():
# https://github.com/wesm/feather/issues/53
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3),
columns=list('aaa')).copy()
_assert_error_on_write(df, ValueError)
def test_duplicate_columns():
# only works for version 2
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'a', 'b'])
_check_arrow_roundtrip(table)
_assert_error_on_write(table, ValueError, version=1)
@pytest.mark.pandas
def test_unsupported():
# https://github.com/wesm/feather/issues/240
# serializing actual python objects
# custom python objects
class A:
pass
df = pd.DataFrame({'a': [A(), A()]})
_assert_error_on_write(df, ValueError)
# non-strings
df = pd.DataFrame({'a': ['a', 1, 2.0]})
_assert_error_on_write(df, TypeError)
@pytest.mark.pandas
def test_v2_set_chunksize():
df = pd.DataFrame({'A': np.arange(1000)})
table = pa.table(df)
buf = io.BytesIO()
write_feather(table, buf, chunksize=250, version=2)
result = buf.getvalue()
ipc_file = pa.ipc.open_file(pa.BufferReader(result))
assert ipc_file.num_record_batches == 4
assert len(ipc_file.get_batch(0)) == 250
@pytest.mark.pandas
def test_v2_compression_options():
df = pd.DataFrame({'A': np.arange(1000)})
cases = [
# compression, compression_level
('uncompressed', None),
('lz4', None),
('zstd', 1),
('zstd', 10)
]
for compression, compression_level in cases:
_check_pandas_roundtrip(df, compression=compression,
compression_level=compression_level)
buf = io.BytesIO()
# LZ4 doesn't support compression_level
with pytest.raises(pa.ArrowInvalid,
match="doesn't support setting a compression level"):
write_feather(df, buf, compression='lz4', compression_level=10)
# Trying to compress with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support compression option"):
write_feather(df, buf, compression='lz4', version=1)
# Trying to set chunksize with V1
with pytest.raises(
ValueError,
match="Feather V1 files do not support chunksize option"):
write_feather(df, buf, chunksize=4096, version=1)
# Unsupported compressor
with pytest.raises(ValueError,
match='compression="snappy" not supported'):
write_feather(df, buf, compression='snappy')
def test_v2_lz4_default_compression():
# ARROW-8750: Make sure that the compression=None option selects lz4 if
# it's available
if not pa.Codec.is_available('lz4_frame'):
pytest.skip("LZ4 compression support is not built in C++")
# some highly compressible data
t = pa.table([np.repeat(0, 100000)], names=['f0'])
buf = io.BytesIO()
write_feather(t, buf)
default_result = buf.getvalue()
buf = io.BytesIO()
write_feather(t, buf, compression='uncompressed')
uncompressed_result = buf.getvalue()
assert len(default_result) < len(uncompressed_result)
def test_v1_unsupported_types():
table = pa.table([pa.array([[1, 2, 3], [], None])], names=['f0'])
buf = io.BytesIO()
with pytest.raises(TypeError,
match=("Unsupported Feather V1 type: "
"list<item: int64>. "
"Use V2 format to serialize all Arrow types.")):
write_feather(table, buf, version=1)
@pytest.mark.slow
@pytest.mark.pandas
def test_large_dataframe(version):
df = pd.DataFrame({'A': np.arange(400000000)})
_check_pandas_roundtrip(df, version=version)
@pytest.mark.large_memory
@pytest.mark.pandas
def test_chunked_binary_error_message():
# ARROW-3058: As Feather does not yet support chunked columns, we at least
# make sure it's clear to the user what is going on
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
# Works fine with version 2
buf = io.BytesIO()
write_feather(df, buf, version=2)
result = read_feather(pa.BufferReader(buf.getvalue()))
assert_frame_equal(result, df)
with pytest.raises(ValueError, match="'byte_col' exceeds 2GB maximum "
"capacity of a Feather binary column. This restriction "
"may be lifted in the future"):
write_feather(df, io.BytesIO(), version=1)
def test_feather_without_pandas(tempdir, version):
# ARROW-8345
table = pa.table([pa.array([1, 2, 3])], names=['f0'])
path = str(tempdir / "data.feather")
_check_arrow_roundtrip(table, path)
@pytest.mark.pandas
def test_read_column_selection(version):
# ARROW-8641
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=['a', 'b', 'c'])
# select columns as string names or integer indices
_check_pandas_roundtrip(
df, columns=['a', 'c'], expected=df[['a', 'c']], version=version)
_check_pandas_roundtrip(
df, columns=[0, 2], expected=df[['a', 'c']], version=version)
# different order is followed
_check_pandas_roundtrip(
df, columns=['b', 'a'], expected=df[['b', 'a']], version=version)
_check_pandas_roundtrip(
df, columns=[1, 0], expected=df[['b', 'a']], version=version)
def test_read_column_duplicated_selection(tempdir, version):
# duplicated columns in the column selection
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'c'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=version)
expected = pa.table([[1, 2, 3], [4, 5, 6], [1, 2, 3]],
names=['a', 'b', 'a'])
for col_selection in [['a', 'b', 'a'], [0, 1, 0]]:
result = read_table(path, columns=col_selection)
assert result.equals(expected)
def test_read_column_duplicated_in_file(tempdir):
# duplicated columns in feather file (only works for feather v2)
table = pa.table([[1, 2, 3], [4, 5, 6], [7, 8, 9]], names=['a', 'b', 'a'])
path = str(tempdir / "data.feather")
write_feather(table, path, version=2)
# no selection works fine
result = read_table(path)
assert result.equals(table)
# selection with indices works
result = read_table(path, columns=[0, 2])
assert result.column_names == ['a', 'a']
# selection with column names errors
with pytest.raises(ValueError):
read_table(path, columns=['a', 'b'])
def test_nested_types(compression):
# https://issues.apache.org/jira/browse/ARROW-8860
table = pa.table({'col': pa.StructArray.from_arrays(
[[0, 1, 2], [1, 2, 3]], names=["f1", "f2"])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[1, 2], [3, 4]])})
_check_arrow_roundtrip(table, compression=compression)
table = pa.table({'col': pa.array([[[1, 2], [3, 4]], [[5, 6], None]])})
_check_arrow_roundtrip(table, compression=compression)
@h.given(past.all_tables, st.sampled_from(["uncompressed", "lz4", "zstd"]))
def test_roundtrip(table, compression):
_check_arrow_roundtrip(table, compression=compression)
def test_feather_v017_experimental_compression_backward_compatibility(datadir):
# ARROW-11163 - ensure newer pyarrow versions can read the old feather
# files from version 0.17.0 with experimental compression support (before
# it was officially added to IPC format in 1.0.0)
# file generated with:
# table = pa.table({'a': range(5)})
# from pyarrow import feather
# feather.write_feather(
# table, "v0.17.0.version=2-compression=lz4.feather",
# compression="lz4", version=2)
expected = pa.table({'a': range(5)})
result = read_table(datadir / "v0.17.0.version=2-compression=lz4.feather")
assert result.equals(expected)
| apache-2.0 |
jorge2703/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
kc-lab/dms2dfe | dms2dfe/lib/plot_mut_data_scatter.py | 2 | 17844 | #!usr/bin/python
# Copyright 2016, Rohan Dandage <[email protected],[email protected]>
# This program is distributed under General Public License v. 3.
"""
================================
``plot_mut_data_scatter``
================================
"""
import sys
from os.path import splitext,exists,basename
from os import makedirs,stat
import pandas as pd
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
matplotlib.rcParams['axes.unicode_minus']=False
matplotlib.use('Agg') # no Xwindows
import matplotlib.pyplot as plt
# matplotlib.style.use('ggplot')
import logging
logging.basicConfig(format='[%(asctime)s] %(levelname)s\tfrom %(filename)s in %(funcName)s(..): %(message)s',level=logging.DEBUG) #
# from dms2dfe.lib.io_strs import make_pathable_string
from dms2dfe.lib.io_plots import saveplot,get_axlims
from dms2dfe.lib.io_dfs import set_index,denanrows
def gettopnlastdiff(data,col1,col2,zcol=None,rows=5,zcol_threshold=None,
col_classes=None,classes=[]):
"""
Plot difference between top mutants
:param data: pandas dataframe
:param col1: name of column1
:param col2: name of column2
"""
data.loc[:,'diff']=data.loc[:,col2]-data.loc[:,col1]
if zcol is None:
data_heads=data.sort_values(by='diff',ascending=True).head(rows)#.index
data_tails=data.sort_values(by='diff',ascending=True).tail(rows)#.index
else:
if not zcol_threshold is None:
data=data.loc[(data.loc[:,zcol]<zcol_threshold),:]
# df.sort_values(['a', 'b'], ascending=[True, False])
data_heads=data.sort_values(by=['diff',zcol],ascending=[True,True]).head(rows)#.index
data_tails=data.sort_values(by=['diff',zcol],ascending=[True,False]).tail(rows)#.index
if not col_classes is None:
data_heads=data_heads.loc[(data_heads.loc[:,col_classes]==classes[0]),:]
data_tails=data_tails.loc[(data_tails.loc[:,col_classes]==classes[1]),:]
return data_heads.index,data_tails.index,
from dms2dfe.lib.plot_mut_data import data2mut_matrix,data2sub_matrix
# from dms2dfe.lib.io_mut_files import concat_cols
from dms2dfe.lib.io_plots import repel_labels
def plot_sc(data,ax,xcol,ycol,ylabel='',
heads=[],tails=[],repel=0.045,
annot_headtails=True,
color_sca=None,
color_dots='both',
color_heads='r',color_tails='b',
zcol=None,
zcol_threshold=None,
diagonal=True,
space=0.2,
axlims=None,
):
"""
Plot scatter
:param data: pandas dataframe
:param ax: axes object
:param xcol: column name of x data
:param ycol: column name of y data
"""
if (not zcol is None) and (sum(~pd.isnull(data.loc[:,zcol]))==0):
zcol=None
if zcol is None:
ax=data.plot.scatter(xcol,ycol,edgecolor='none',alpha=0.6,
c='yellowgreen',
ax=ax)
else:
data=data.sort_values(by=zcol,ascending=False)
ax.scatter(x=data.loc[:,xcol],
y=data.loc[:,ycol],
edgecolor='none',
alpha=0.6,
c=data.loc[:,zcol],
cmap='summer_r',
)
ax.set_xlabel(xcol)
if len(heads)==0 and len(tails)==0:
if annot_headtails:
heads,tails=gettopnlastdiff(data,ycol,xcol,zcol=zcol,zcol_threshold=zcol_threshold)
color_sca='none'
color_edge='royalblue'
if (color_dots=='heads') or (color_dots=='both'):
ax.scatter(x=data.loc[heads,xcol],y=data.loc[heads,ycol],
edgecolor=color_edge,
facecolors=color_sca,
)
try:
repel_labels(ax, data.loc[heads, xcol], data.loc[heads, ycol], heads, k=repel,label_color=color_heads)
except:
for s in heads:
ax.text(data.loc[s, xcol], data.loc[s, ycol], s,color=color_heads)
if (color_dots=='tails') or (color_dots=='both'):
ax.scatter(x=data.loc[tails,xcol],y=data.loc[tails,ycol],
edgecolor=color_edge,
facecolors=color_sca,
)
try:
repel_labels(ax, data.loc[tails, xcol], data.loc[tails, ycol], tails, k=repel,label_color=color_tails)
except:
for s in tails:
if s in data.index:
ax.text(data.loc[s, xcol], data.loc[s, ycol], s,color=color_tails)
ax.set_ylabel(ylabel)
if diagonal:
ax.plot([100,-100],[100,-100],linestyle='-',color='darkgray',zorder=0)
if axlims is None:
xlims,ylims=get_axlims(data.loc[:,xcol],data.loc[:,ycol],space=space)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
axlims=[xlims,ylims]
else:
ax.set_xlim(axlims[0])
ax.set_ylim(axlims[1])
return ax,heads,tails,axlims
def mutids2refrei(mutid):
"""
Convert mutids to reference amino acid and index
:param mutid: mutation ID
"""
return mutid[:-1]
def mutids2subid(mutid):
"""
Convert mutids to substitution id
:param mutid: mutation ID
"""
return mutid[0]+mutid[-1]
def plot_scatter_mutilayered(data_all,xcol,ycol,
mutids_heads=[],mutids_tails=[],
repel=0.045,
color_dots='both',
annot_headtails=True,
color_heads='r',color_tails='b',
note_text='',
stds_sub_pos=None,
col_z_mutations=None,
zcol_threshold=None,
errorbars=False,
diagonal=True,
space=0.2,
figsize=[8.5,6],
plot_fh=None,):
"""
Plot multi-layered scatter
:param data_all: pandas dataframe
:param xcol: column name of x data
:param ycol: column name of y data
"""
# print data_all.shape
if 'mutids' in data_all:
data_all=data_all.set_index('mutids')
data_all_mut=data_all.copy()
if not col_z_mutations is None:
data_all=data_all.drop(col_z_mutations,axis=1)
# sum(~pd.isnull(data_all_mut.loc[:,col_z_mutations]))
data_all_pos=pd.concat([data2mut_matrix(data_all.reset_index(),xcol,'mut','aas').mean(),
data2mut_matrix(data_all.reset_index(),ycol,'mut','aas').mean(),
data2mut_matrix(data_all.reset_index(),xcol,'mut','aas').std(),
data2mut_matrix(data_all.reset_index(),ycol,'mut','aas').std(),
],axis=1)
data_all_pos.columns=[xcol,ycol,xcol+'std',ycol+'std']
data_all_sub=pd.concat([data2sub_matrix(data_all,xcol,'mut','aas',aggfunc='mean').unstack(),
data2sub_matrix(data_all,ycol,'mut','aas',aggfunc='mean').unstack(),
data2sub_matrix(data_all,xcol,'mut','aas',aggfunc='std').unstack(),
data2sub_matrix(data_all,ycol,'mut','aas',aggfunc='std').unstack(),
],axis=1)
data_all_sub.columns=[xcol,ycol,xcol+'std',ycol+'std']
data_all_sub=denanrows(data_all_sub)
if not 'Wild type' in data_all_sub:
data_all_sub=data_all_sub.reset_index()
mutids=[]
for i in data_all_sub.index:
mutids.append('%s%s' % (data_all_sub.reset_index().loc[i,'Wild type'],
data_all_sub.reset_index().loc[i,'Mutation to']))
data_all_sub.loc[:,'mutids']=mutids
data_all_sub=data_all_sub.set_index('mutids')
fig=plt.figure(figsize=figsize)
ax1=plt.subplot(131)
ax2=plt.subplot(132)
ax3=plt.subplot(133)
if errorbars:
ax2.errorbar(data_all_sub.loc[:,xcol],data_all_sub.loc[:,ycol],
xerr=data_all_sub.loc[:,xcol+'std'],
yerr=data_all_sub.loc[:,ycol+'std'],
fmt="none",ecolor='gray',alpha=0.15,
capthick=0,
zorder=0)
ax3.errorbar(data_all_pos.loc[:,xcol],data_all_pos.loc[:,ycol],
xerr=data_all_pos.loc[:,xcol+'std'],
yerr=data_all_pos.loc[:,ycol+'std'],
fmt="none",ecolor='gray',alpha=0.15,
capthick=0,
zorder=0)
ax1,mutids_heads,mutids_tails,axlims=plot_sc(data_all_mut,ax1,xcol,ycol,ylabel=ycol,
heads=mutids_heads,tails=mutids_tails,
annot_headtails=False,
zcol=col_z_mutations,zcol_threshold=zcol_threshold,
repel=repel,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,)
repel_sub=repel*len(data_all_sub)/(len(data_all_mut))*5
if repel_sub>repel:
repel_sub=repel
# print repel_sub
# print data_all_sub.columns
# data_all_sub=denanrows(data_all_sub)
data_all_sub=data_all_sub.loc[denanrows(data_all_sub.loc[:,[xcol,ycol]]).index.tolist(),:]
# print data_all_sub.shape
ax2,_,_,_=plot_sc(data_all_sub,ax2,xcol,ycol,
heads=[mutids2subid(i) for i in mutids_heads],tails=[mutids2subid(i) for i in mutids_tails],
annot_headtails=False,
repel=repel_sub,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,
# axlims=axlims
)
repel_pos=repel*len(data_all_pos)/(len(data_all_mut))*12.5
if repel_pos>repel:
repel_pos=repel
# print repel_pos
ax3,_,_,_=plot_sc(data_all_pos,ax3,xcol,ycol,
heads=[mutids2refrei(i) for i in mutids_heads],tails=[mutids2refrei(i) for i in mutids_tails],
annot_headtails=False,
repel=repel_pos,
color_dots=color_dots,color_heads=color_heads,color_tails=color_tails,diagonal=diagonal,space=space,
# axlims=axlims
)
ax1.set_title('Mutations',color="gray")
ax2.set_title('Substitutions',color="gray")
ax3.set_title('Positions',color="gray")
fig.suptitle(note_text, fontsize=15,color="k")
saveplot(plot_fh,form='both',transparent=False)
return data_all_mut,data_all_sub,data_all_pos
def set_title_higher(axes,labels,height=1.2,color='k'):
"""
Set title higher
:param axes: list of axes object
:param labels: list of labels for titles
"""
for i in range(len(labels)):
ax=axes[i]
label=labels[i]
x=ax.get_xlim()[0]
y=ax.get_ylim()[0]+(ax.get_ylim()[1]-ax.get_ylim()[0])*height
ax.text(x,y,label,
color=color,fontsize=15)
def data_comparison2scatter_mutilayered(data,data_label,color_dots=None,
mutids_heads=[],mutids_tails=[],
col_filter=None,
note_text=None,
col_pvals=None,
repel=0.045,
figsize=[15,5],
plot_fh=None):
"""
Wrapper to plot multi layered scatter plot
:param data: pandas dataframe
:param data_label: label of the data
"""
from dms2dfe.lib.io_strs import splitlabel
# print data.shape
data=set_index(data,'mutids')
labels=splitlabel(data_label,splitby=' versus ',ctrl='$37^{0}$C')
if not note_text is None:
labels=["%s (%s)" % (l,note_text) for l in labels]
data.loc[:,labels[0]]=data.loc[:,'Fi_test']
data.loc[:,labels[1]]=data.loc[:,'Fi_ctrl']
if not col_pvals is None:
data.loc[:,col_pvals]=np.log10(data.loc[:,col_pvals])
if not data.index.name=='mutids':
data.index.name='mutids'
# print data.index
zcol_threshold=np.log10(0.01)
if not col_filter is None:
data.loc[data.loc[:,col_filter],labels]
cols=['mut','ref']+labels
if not col_pvals is None:
cols=cols+[col_pvals]
data=denanrows(data.loc[:,cols])
# print data.shape
# print data.index.name
# print data.columns.tolist()
plot_scatter_mutilayered(data,labels[1],labels[0],
plot_fh=plot_fh,
color_dots=color_dots,
mutids_heads=mutids_heads,
mutids_tails=mutids_tails,
color_heads='b',color_tails='b',
col_z_mutations=col_pvals,
zcol_threshold=0.05,
repel=repel,
figsize=figsize,#[6.375,4.5],
)
def plot_mulitilayered_scatter_per_class_comparison(prj_dh,
data_fns,data_labels,
filter_selection=None,
data_sections_pvals=None,
fns2sides=None,
filter_sections=None,
filter_signi=True,
col_pvals=None,
col_filter=None,
figsize=[9,3],
force=False):
"""
Wrapper to plot multi layered scatter from data_comparison
:param prj_dh: path to the project directory
:param data_fns: list of filenames
:param data_labels: list of corresponding labels
"""
plot_type='scatter_mutilayered_per_class_comparison'
dtype='data_comparison'
data_mutants_select=pd.DataFrame()
for i in range(len(data_labels)):
data_fn=data_fns[i]
data_label=data_labels[i]
data_fh='%s/data_comparison/aas/%s' % (prj_dh,data_fn)
data_comparison=pd.read_csv(data_fh).set_index('mutids')
data_plot=data_comparison.copy()
print len(denanrows(data_plot.loc[:,'class_comparison']))
if (filter_selection=='by_side'):
selection=fns2sides[data_fn]
else:# (filter_selection=='signi'):
selection=data_sections_pvals.loc[data_label,'selection']
# pval=data_sections_pvals.loc[data_label,'All']
if selection=='positive':
color_dots='heads'
elif selection=='negative':
color_dots='tails'
print color_dots
if ((filter_selection=='signi') or (filter_selection=='by_side')):
data_comparison=data_comparison.loc[(data_comparison.loc[:,'class_comparison']==selection),:]
else:
data_comparison=data_comparison.loc[((data_comparison.loc[:,'class_comparison']=='positive')\
| (data_comparison.loc[:,'class_comparison']=='negative')),:]
# data_plot=data_comparison.copy()
if filter_sections==True:
data_comparison=data_comparison.loc[~pd.isnull(data_comparison.loc[:,'sectionn']),:]
sectionn='True'
elif filter_sections=='signi':
sectionn=data_sections_pvals.loc[data_label,'Significant section']
data_comparison=data_comparison.loc[(data_comparison.loc[:,'sectionn']==sectionn),:]
# get intersect of (mutids significant section) and (class of selection)
else:
sectionn='all'
if filter_signi:
data_comparison.loc[pd.isnull(data_comparison.loc[:,'Significant']),'Significant']=False
data_comparison=data_comparison.loc[data_comparison.loc[:,'Significant'],:]
# by lowest of multiplication of pvals (only empiric)
zcol='z'
xcol='FiA_ctrl'
ycol='FiA_test'
data_comparison.loc[:,zcol]=data_comparison.loc[:,'padj_test']*data_comparison.loc[:,'padj_ctrl']
# get top 5
# data_comparison.to_csv('test.csv')
mutids_heads,mutids_tails=gettopnlastdiff(data_comparison,ycol,xcol,
# zcol=zcol
col_classes='class_comparison',
classes=['positive','negative']
)
data_comparison.loc[:,'data_label']=data_label
data_comparison.loc[:,'data_fn']=data_fn
data_mutants_select=data_mutants_select.append(data_comparison.loc[mutids_heads+mutids_tails,:])
plot_fh='%s/plots/aas/fig_%s_section_%s_%s.pdf' % (prj_dh,plot_type,sectionn.replace(',','_'),data_fn)
print plot_fh
print mutids_heads
print mutids_tails
if not exists(plot_fh) or force:
note_text=None
data_comparison2scatter_mutilayered(data_plot,data_label,
color_dots,note_text=note_text,
plot_fh=plot_fh,
mutids_heads=mutids_heads,
mutids_tails=mutids_tails,
col_pvals=col_pvals,
repel=0.08,
figsize=figsize,
)
return data_mutants_select | gpl-3.0 |
ankurankan/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
arekolek/MaxIST | map.py | 1 | 2115 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from xml.etree import ElementTree as ET
from itertools import chain
import graph_tool.all as gt
def vertices(f):
root = ET.parse(f)
ns = {'s': 'http://sndlib.zib.de/network'}
for e in root.findall('*/*/s:node', ns):
yield (float(e.find('*/s:x', ns).text), float(e.find('*/s:y', ns).text))
def edges(f):
root = ET.parse(f)
ns = {'s': 'http://sndlib.zib.de/network'}
pos = {e.get('id'):(float(e.find('*/s:x', ns).text), float(e.find('*/s:y', ns).text)) for e in root.findall('*/*/s:node', ns)}
for e in root.findall('*/*/s:link', ns):
yield chain(pos[e.find('s:source', ns).text], pos[e.find('s:target', ns).text])
if __name__ == '__main__':
from sys import argv
from re import findall
for f in argv[1:]:
vs = np.array(list(vertices(f)))
xmin, ymin = vs.min(axis=0)
xmax, ymax = vs.max(axis=0)
#x, y = vs.mean(axis=0)
x, y = (xmin+xmax)/2, (ymin+ymax)/2
m = Basemap(projection='stere', lon_0=x, lat_0=y, width=1000, height=1000)
xlo, ylo = m(xmin, ymin)
xhi, yhi = m(xmax, ymax)
span = max(xhi-xlo, yhi-ylo) * 1.15
#xmin, xmax = xmin-(xmax-xmin)/10, xmax+(xmax-xmin)/10
#ymin, ymax = ymin-(ymax-ymin)/5, ymax+(ymax-ymin)/5
# create new figure, axes instances.
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
# setup mercator map projection.
m = Basemap(#llcrnrlon=xmin,llcrnrlat=ymin,urcrnrlon=xmax,urcrnrlat=ymax,\
#rsphere=(6378137.00,6356752.3142),\
resolution='l', projection='stere',\
lon_0=x, lat_0=y, width=span, height=span)
#m.drawcountries(linestyle='dotted')
m.fillcontinents(color='#dddddd')
for e in edges(f):
m.drawgreatcircle(*e,linewidth=0.2,color='black')
m.scatter(vs[:,0], vs[:,1], latlon=True, s=4, c='black', alpha=1, zorder=10)
name = findall('[^/.]+', f)[-2]
fig.set_size_inches(1.042, 1.042)
fig.savefig('output/{}.pdf'.format(name), dpi=100)
| mit |
PYPIT/PYPIT | pypeit/debugger.py | 1 | 3461 | """
Module to setup the PypeIt debugger
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
# These need to be outside of the def's
try:
from pypeit.ginga import show_image
except ImportError: # Ginga is not yet required
pass
else:
from pypeit.ginga import clear_canvas
# Moved to the top and changed to only import set_trace
from pdb import set_trace
# ADD-ONs from xastropy
def plot1d(*args, **kwargs):
""" Plot 1d arrays
Parameters
----------
outfil= : string
Outfil
xlbl,ylbl= : string
Labels for x,y axes
xrng= list
Range of x limits
yrng= list
Range of y limits
xtwo= : ndarray
x-values for a second array
ytwo= : ndarray
y-values for a second array
mtwo= : str
marker for xtwo
scatter= : Bool
True for a scatter plot
NOTE: Any extra parameters are fed as kwargs to plt.plot()
"""
# Error checking
if len(args) == 0:
print('x_guis.simple_splot: No arguments!')
return
if not isinstance(args[0], np.ndarray):
print('x_guis: Input array is not a numpy.ndarray!')
return
plt_dict = {}
# Outfil
if ('outfil' in kwargs):
plt_dict['outfil'] = kwargs['outfil']
kwargs.pop('outfil')
else:
plt_dict['outfil'] = None
# Scatter plot?
if ('scatter' in kwargs):
kwargs.pop('scatter')
plt_dict['flg_scatt'] = 1
else:
plt_dict['flg_scatt'] = 0
# Second array?
if ('xtwo' in kwargs) & ('ytwo' in kwargs):
plt_dict['xtwo'] = kwargs['xtwo']
kwargs.pop('xtwo')
plt_dict['ytwo'] = kwargs['ytwo']
kwargs.pop('ytwo')
plt_dict['flg_two'] = 1
# mtwo
if 'mtwo' in kwargs:
plt_dict['mtwo']=kwargs['mtwo']
kwargs.pop('mtwo')
else:
plt_dict['mtwo']=''
else:
plt_dict['flg_two'] = 0
# Limits
for irng in ['xrng','yrng']:
try:
plt_dict[irng] = kwargs[irng]
except KeyError:
plt_dict[irng] = None
else:
kwargs.pop(irng)
# Labels
for ilbl in ['xlbl','ylbl']:
try:
plt_dict[ilbl] = kwargs[ilbl]
except KeyError:
plt_dict[ilbl] = None
else:
kwargs.pop(ilbl)
# Clear
plt.clf()
# Plot it right up
if len(args) == 1:
plt.plot(args[0].flatten(), **kwargs)
else:
for kk in range(1,len(args)):
if plt_dict['flg_scatt'] == 0:
plt.plot(args[0].flatten(),args[kk].flatten(), **kwargs)
else:
plt.scatter(args[0].flatten(),args[kk].flatten(), marker='o', **kwargs)
if plt_dict['flg_two'] == 1:
plt.plot(plt_dict['xtwo'], plt_dict['ytwo'], plt_dict['mtwo'], color='red', **kwargs)
# Limits
if plt_dict['xrng'] is not None:
plt.xlim(plt_dict['xrng'])
if plt_dict['yrng'] is not None:
plt.ylim(plt_dict['yrng'])
# Label
if plt_dict['xlbl'] is not None:
plt.xlabel(plt_dict['xlbl'])
if plt_dict['ylbl'] is not None:
plt.ylabel(plt_dict['ylbl'])
# Output?
if plt_dict['outfil'] is not None:
plt.savefig(plt_dict['outfil'])
print('Wrote figure to {:s}'.format(plt_dict['outfil']))
else: # Show
plt.show()
return
| gpl-3.0 |
shenzebang/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
Enchufa2/video-tester | VideoTester/gui.py | 1 | 23890 | # coding=UTF8
## This file is part of VideoTester
## See https://github.com/Enchufa2/video-tester for more information
## Copyright 2011-2016 Iñaki Úcar <[email protected]>
## This program is published under a GPLv3 license
import wx, wx.aui, pickle, textwrap, logging
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx as Toolbar
from gi.repository import Gst, GstVideo, GObject
from . import __version__, VTLOG, VTClient, netifaces, \
supported_codecs, supported_protocols
from .resources import getVTIcon, getVTBitmap
class FuncLog(logging.Handler):
'''
A logging handler that sends logs to an update function.
'''
def __init__(self, textctrl):
logging.Handler.__init__(self)
self.textctrl = textctrl
def emit(self, record):
self.textctrl.SetInsertionPointEnd()
self.textctrl.WriteText(self.format(record) + '\n')
class VTframe(wx.Frame):
'''
Main window.
'''
def __init__(self, conf):
self.main = VTClient(conf)
wx.Frame.__init__(self, None)
self.SetIcon(getVTIcon())
# Menu Bar
self.vtmenubar = wx.MenuBar()
menu = wx.Menu()
self.m_files = menu.Append(wx.ID_OPEN, '&Open files...', 'Select Pickle files to plot')
menu.AppendSeparator()
self.m_exit = menu.Append(wx.ID_EXIT, 'E&xit', 'Exit program')
self.vtmenubar.Append(menu, '&File')
menu = wx.Menu()
self.m_run = menu.Append(wx.ID_REFRESH, '&Run...', 'Run test')
self.vtmenubar.Append(menu, 'R&un')
menu = wx.Menu()
self.m_about = menu.Append(wx.ID_ABOUT, '&About', 'About this program')
self.vtmenubar.Append(menu, '&Help')
self.SetMenuBar(self.vtmenubar)
self.vtstatusbar = self.CreateStatusBar(1, 0)
self.tabs = wx.Notebook(self, -1, style=0)
self.conf_tab = wx.Panel(self.tabs, -1)
self.video_label = wx.StaticText(self.conf_tab, -1, 'Choose a video:')
self.video = wx.Choice(self.conf_tab, -1, choices=[x[0] for x in self.main.videos])
self.codec_label = wx.StaticText(self.conf_tab, -1, 'Choose a codec:')
self.codec = wx.Choice(self.conf_tab, -1, choices=supported_codecs.keys())
self.bitrate_label = wx.StaticText(self.conf_tab, -1, 'Select the bitrate:')
self.bitrate = wx.Slider(self.conf_tab, -1, self.main.conf['bitrate'], 64, 1024, style=wx.SL_HORIZONTAL | wx.SL_LABELS)
self.framerate_label = wx.StaticText(self.conf_tab, -1, 'Select the framerate:')
self.framerate = wx.Slider(self.conf_tab, -1, self.main.conf['framerate'], 1, 100, style=wx.SL_HORIZONTAL | wx.SL_LABELS)
self.sb_video = wx.StaticBox(self.conf_tab, -1, 'Video options:')
self.iface_label = wx.StaticText(self.conf_tab, -1, 'Interface:')
self.iface = wx.Choice(self.conf_tab, -1, choices=netifaces)
self.ip_label = wx.StaticText(self.conf_tab, -1, 'Server IP:')
self.ip = wx.TextCtrl(self.conf_tab, -1, self.main.conf['ip'])
self.port_label = wx.StaticText(self.conf_tab, -1, 'Server port:')
self.port = wx.TextCtrl(self.conf_tab, -1, str(self.main.port))
self.protocol = wx.RadioBox(self.conf_tab, -1, 'Protocol:', choices=supported_protocols, majorDimension=3, style=wx.RA_SPECIFY_COLS)
self.sb_net = wx.StaticBox(self.conf_tab, -1, 'Net options:')
self.qos = []
self.qos.append(('latency', wx.CheckBox(self.conf_tab, -1, 'Latency')))
self.qos.append(('delta', wx.CheckBox(self.conf_tab, -1, 'Delta')))
self.qos.append(('jitter', wx.CheckBox(self.conf_tab, -1, 'Jitter')))
self.qos.append(('skew', wx.CheckBox(self.conf_tab, -1, 'Skew')))
self.qos.append(('bandwidth', wx.CheckBox(self.conf_tab, -1, 'Bandwidth')))
self.qos.append(('plr', wx.CheckBox(self.conf_tab, -1, 'Packet Loss Rate')))
self.qos.append(('pld', wx.CheckBox(self.conf_tab, -1, 'Packet Loss Distribution')))
self.sb_qos = wx.StaticBox(self.conf_tab, -1, 'QoS measures:')
self.bs = []
self.bs.append(('streameye', wx.CheckBox(self.conf_tab, -1, 'Stream Eye')))
self.bs.append(('refstreameye', wx.CheckBox(self.conf_tab, -1, 'refStream Eye')))
self.bs.append(('gop', wx.CheckBox(self.conf_tab, -1, 'GOP size')))
self.bs.append(('iflr', wx.CheckBox(self.conf_tab, -1, 'I Frame Loss Rate')))
self.sb_bs = wx.StaticBox(self.conf_tab, -1, 'BitStream measures:')
self.vq = []
self.vq.append(('psnr', wx.CheckBox(self.conf_tab, -1, 'PSNR')))
self.vq.append(('ssim', wx.CheckBox(self.conf_tab, -1, 'SSIM')))
self.vq.append(('g1070', wx.CheckBox(self.conf_tab, -1, 'G.1070')))
self.vq.append(('psnrtomos', wx.CheckBox(self.conf_tab, -1, 'PSNRtoMOS')))
self.vq.append(('miv', wx.CheckBox(self.conf_tab, -1, 'MIV')))
self.sb_vq = wx.StaticBox(self.conf_tab, -1, 'Video quality measures:')
self.log_tab = wx.Panel(self.tabs, -1)
self.log = wx.TextCtrl(self.log_tab, -1, '', style=wx.TE_MULTILINE | wx.TE_READONLY)
self.results_tab = PlotNotebook(self.tabs)
self.video_tab = wx.Panel(self.tabs, -1)
self.player = wx.Panel(self.video_tab, -1)
self.player_button = wx.Button(self.video_tab, -1, 'Play', name='playvideo', size=(200, 50))
self.__setProperties()
self.__doLayout()
self.__initVideo()
self.Bind(wx.EVT_MENU, self.onOpen, self.m_files)
self.Bind(wx.EVT_MENU, self.onExit, self.m_exit)
self.Bind(wx.EVT_MENU, self.onRun, self.m_run)
self.Bind(wx.EVT_MENU, self.onAbout, self.m_about)
self.Bind(wx.EVT_CLOSE, self.onCloseWindow)
self.player_button.Bind(wx.EVT_BUTTON, self.onPlay)
# Logging
console = VTLOG.handlers[0]
self.hdlr = FuncLog(self.log)
self.hdlr.setLevel(console.level)
console.setLevel(40)
self.hdlr.setFormatter(console.formatter)
VTLOG.addHandler(self.hdlr)
def __setProperties(self):
self.SetTitle('Video Tester')
self.SetSize((800, 600))
self.Hide()
self.vtstatusbar.SetStatusWidths([-1])
vtstatusbar_fields = ['VT Client']
for i in range(len(vtstatusbar_fields)):
self.vtstatusbar.SetStatusText(vtstatusbar_fields[i], i)
self.video_label.SetMinSize((160, 17))
self.video.SetMinSize((120, 25))
self.video.SetSelection(zip(*self.main.videos)[0].index(self.main.conf['video']))
self.codec_label.SetMinSize((160, 17))
self.codec.SetMinSize((120, 25))
self.codec.SetSelection(supported_codecs.keys().index(self.main.conf['codec']))
self.bitrate_label.SetMinSize((160, 17))
self.bitrate.SetMinSize((210, 50))
self.framerate_label.SetMinSize((160, 17))
self.framerate.SetMinSize((210, 50))
self.iface_label.SetMinSize((140, 17))
self.iface.SetMinSize((80, 25))
self.iface.SetSelection(netifaces.index(self.main.conf['iface']))
self.ip_label.SetMinSize((140, 17))
self.ip.SetMinSize((150, 25))
self.port_label.SetMinSize((140, 17))
self.protocol.SetSelection(supported_protocols.index(self.main.conf['protocol']))
for name, el in self.qos + self.bs + self.vq:
if name in self.main.conf['qos'] + self.main.conf['bs'] + self.main.conf['vq']:
el.SetValue(True)
self.results_tab.Hide()
self.video_tab.Hide()
def __doLayout(self):
sizer_body = wx.BoxSizer(wx.VERTICAL)
sizer_log_tab = wx.BoxSizer(wx.HORIZONTAL)
sizer_video_tab = wx.BoxSizer(wx.VERTICAL)
sizer_conf_tab = wx.GridSizer(2, 1, 3, 3)
sizer_conf_up = wx.GridSizer(1, 2, 0, 0)
sizer_conf_down = wx.GridSizer(1, 3, 0, 0)
sizer_conf_tab.Add(sizer_conf_up, 1, wx.EXPAND, 0)
sizer_conf_tab.Add(sizer_conf_down, 1, wx.EXPAND, 0)
sizer_video = wx.GridSizer(4, 1, 0, 0)
sizer_net = wx.GridSizer(4, 1, 0, 0)
sizer_qos = wx.BoxSizer(wx.VERTICAL)
sizer_bs = wx.BoxSizer(wx.VERTICAL)
sizer_vq = wx.BoxSizer(wx.VERTICAL)
self.sb_video.Lower()
sizer_sb_video = wx.StaticBoxSizer(self.sb_video, wx.HORIZONTAL)
sizer_sb_video.Add(sizer_video, 1, wx.EXPAND | wx.ALL, 10)
self.sb_net.Lower()
sizer_sb_net = wx.StaticBoxSizer(self.sb_net, wx.HORIZONTAL)
sizer_sb_net.Add(sizer_net, 1, wx.EXPAND | wx.ALL, 10)
self.sb_qos.Lower()
sizer_sb_qos = wx.StaticBoxSizer(self.sb_qos, wx.HORIZONTAL)
sizer_sb_qos.Add(sizer_qos, 1, wx.EXPAND | wx.ALL, 10)
self.sb_bs.Lower()
sizer_sb_bs = wx.StaticBoxSizer(self.sb_bs, wx.HORIZONTAL)
sizer_sb_bs.Add(sizer_bs, 1, wx.EXPAND | wx.ALL, 10)
self.sb_vq.Lower()
sizer_sb_vq = wx.StaticBoxSizer(self.sb_vq, wx.HORIZONTAL)
sizer_sb_vq.Add(sizer_vq, 1, wx.EXPAND | wx.ALL, 10)
sizer_videobox = wx.BoxSizer(wx.HORIZONTAL)
sizer_videobox.Add(self.video_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_videobox.Add(self.video, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_codec = wx.BoxSizer(wx.HORIZONTAL)
sizer_codec.Add(self.codec_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_codec.Add(self.codec, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_bitrate = wx.BoxSizer(wx.HORIZONTAL)
sizer_bitrate.Add(self.bitrate_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_bitrate.Add(self.bitrate, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_framerate = wx.BoxSizer(wx.HORIZONTAL)
sizer_framerate.Add(self.framerate_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_framerate.Add(self.framerate, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_video.Add(sizer_videobox, 1, wx.EXPAND, 0)
sizer_video.Add(sizer_codec, 1, wx.EXPAND, 0)
sizer_video.Add(sizer_bitrate, 1, wx.EXPAND, 0)
sizer_video.Add(sizer_framerate, 1, wx.EXPAND, 0)
sizer_iface = wx.BoxSizer(wx.HORIZONTAL)
sizer_iface.Add(self.iface_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_iface.Add(self.iface, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_ip = wx.BoxSizer(wx.HORIZONTAL)
sizer_ip.Add(self.ip_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_ip.Add(self.ip, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_port = wx.BoxSizer(wx.HORIZONTAL)
sizer_port.Add(self.port_label, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_port.Add(self.port, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_net.Add(sizer_iface, 1, wx.EXPAND, 0)
sizer_net.Add(sizer_ip, 1, wx.EXPAND, 0)
sizer_net.Add(sizer_port, 1, wx.EXPAND, 0)
sizer_net.Add(self.protocol, 0, wx.EXPAND, 0)
for name, el in self.qos:
sizer_qos.Add(el, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
for name, el in self.bs:
sizer_bs.Add(el, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
for name, el in self.vq:
sizer_vq.Add(el, 0, wx.ALIGN_CENTER_VERTICAL | wx.ADJUST_MINSIZE, 0)
sizer_conf_up.Add(sizer_sb_video, 1, wx.EXPAND | wx.ALL^wx.BOTTOM, 10)
sizer_conf_up.Add(sizer_sb_net, 1, wx.EXPAND | wx.ALL^wx.BOTTOM, 10)
sizer_conf_down.Add(sizer_sb_qos, 1, wx.EXPAND | wx.ALL, 10)
sizer_conf_down.Add(sizer_sb_bs, 1, wx.EXPAND | wx.ALL, 10)
sizer_conf_down.Add(sizer_sb_vq, 1, wx.EXPAND | wx.ALL, 10)
self.conf_tab.SetSizer(sizer_conf_tab)
sizer_log_tab.Add(self.log, 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
self.log_tab.SetSizer(sizer_log_tab)
sizer_video_tab.Add(self.player, 1, wx.EXPAND, 0)
sizer_video_tab.Add(self.player_button, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 30)
self.video_tab.SetSizer(sizer_video_tab)
self.video_tab.SetBackgroundColour((0, 0, 0))
self.tabs.AddPage(self.conf_tab, 'Configuration')
self.tabs.AddPage(self.log_tab, 'Log')
self.tabs.AddPage(self.results_tab, 'Results')
self.tabs.AddPage(self.video_tab, 'Video')
sizer_body.Add(self.tabs, 1, wx.EXPAND, 0)
self.SetSizer(sizer_body)
self.Layout()
self.Centre()
def __initVideo(self):
self.pipeline = Gst.parse_launch(
'filesrc name=video1 filesrc name=video2 filesrc name=video3 \
videomixer name=mix ! xvimagesink \
video1. \
! queue ! videoparse framerate=%s/1 name=parser1 \
! textoverlay font-desc="Sans 24" text="Original" \
valignment=top halignment=left shaded-background=true \
! videoscale \
! mix.sink_1 \
video2. \
! queue ! videoparse framerate=%s/1 name=parser2 \
! textoverlay font-desc="Sans 24" text="Coded" \
valignment=top halignment=left shaded-background=true \
! videoscale \
! mix.sink_2 \
video3. \
! queue ! videoparse framerate=%s/1 name=parser3 \
! textoverlay font-desc="Sans 24" text="Received" \
valignment=top halignment=left shaded-background=true \
! videoscale \
! mix.sink_3' % (
self.main.conf['framerate'],
self.main.conf['framerate'],
self.main.conf['framerate']
))
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message', self.onMessage)
bus.connect('sync-message::element', self.onSyncMessage)
def onExit(self, event):
self.Close(True)
def onCloseWindow(self, event):
'''
Show a dialog to verify exit.
'''
# dialog to verify exit (including menuExit)
dlg = wx.MessageDialog(self, 'Do you want to exit?', 'Exit', wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES:
try:
self.pipeline.set_state(Gst.State.NULL)
except:
pass
VTLOG.removeHandler(self.hdlr)
self.Destroy() # frame
def onAbout(self, event):
'''
Show *About* dialog.
'''
license = textwrap.dedent('''\
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.''')
info = wx.AboutDialogInfo()
info.SetIcon(getVTIcon())
info.SetName('Video Tester')
info.SetVersion('version ' + __version__)
info.SetDescription('Video Quality Assessment Tool')
info.SetCopyright('(C) 2011-2016 Iñaki Úcar')
info.SetWebSite('https://github.com/Enchufa2/video-tester')
info.SetLicense(license)
info.AddDeveloper('Iñaki Úcar <[email protected]>')
info.AddDocWriter('Iñaki Úcar <[email protected]>')
info.AddArtist('Almudena M. Castro <[email protected]>')
wx.AboutBox(info)
def onOpen(self, event):
'''
Show *Open files* dialog.
'''
self.video_tab.Hide()
wildcard = u'Pickle files (*.pkl)|*.pkl'
dlg = wx.FileDialog(self, u'Open files', '', '', wildcard, wx.FD_MULTIPLE)
if dlg.ShowModal() == wx.ID_OK:
results = []
for filename in dlg.GetFilenames():
f = open(dlg.GetDirectory() + '/' + filename, 'rb')
results.append(pickle.load(f))
f.close()
dlg.Destroy()
self.__setResults(results)
self.tabs.SetSelection(2)
def onRun(self, event):
'''
Run VT Client.
'''
self.conf_tab.Disable()
self.vtmenubar.Disable()
self.results_tab.Hide()
self.video_tab.Hide()
self.tabs.SetSelection(1)
self.vtstatusbar.SetStatusText('Running...')
self.__setValues()
ret = self.main.run()
if ret:
self.paths, self.caps, results = ret
self.__setResults(results)
self.video_tab.Show()
self.conf_tab.Enable()
wx.Window.Enable(self.vtmenubar)
self.vtstatusbar.SetStatusText('Stopped')
def onPlay(self, event):
'''
Play video files.
'''
if self.player_button.GetLabel() == 'Play':
self.player_button.SetLabel('Stop')
video1 = self.pipeline.get_by_name('video1')
video2 = self.pipeline.get_by_name('video2')
video3 = self.pipeline.get_by_name('video3')
video1.props.location = self.paths['original'][1]
video2.props.location = self.paths['coded'][1]
video3.props.location = self.paths['received'][1]
parser1 = self.pipeline.get_by_name('parser1')
parser2 = self.pipeline.get_by_name('parser2')
parser3 = self.pipeline.get_by_name('parser3')
mix = self.pipeline.get_by_name('mix')
sink_2 = mix.get_child_by_name('sink_2')
sink_3 = mix.get_child_by_name('sink_3')
sink_2.props.xpos = self.caps['width'] * 2
sink_3.props.xpos = self.caps['width']
parser1.props.width = self.caps['width']
parser1.props.height = self.caps['height']
parser2.props.width = self.caps['width']
parser2.props.height = self.caps['height']
parser3.props.width = self.caps['width']
parser3.props.height = self.caps['height']
self.pipeline.set_state(Gst.State.PLAYING)
else:
self.player_button.SetLabel('Play')
self.pipeline.set_state(Gst.State.NULL)
def onSyncMessage(self, bus, message):
if GstVideo.is_video_overlay_prepare_window_handle_message(message):
message.src.set_property('force-aspect-ratio', True)
message.src.set_window_handle(self.video_tab.GetHandle())
def onMessage(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS or t == Gst.MessageType.ERROR:
self.pipeline.set_state(Gst.State.NULL)
self.player_button.SetLabel('Play')
def __setValues(self):
'''
Set configuration options.
'''
self.main.conf['bitrate'] = int(self.bitrate.GetValue())
self.main.conf['framerate'] = int(self.framerate.GetValue())
self.main.conf['video'] = str(self.video.GetStringSelection())
self.main.conf['codec'] = str(self.codec.GetStringSelection())
self.main.conf['iface'] = str(self.iface.GetStringSelection())
self.main.conf['ip'] = str(self.ip.GetValue())
self.main.port = int(self.port.GetValue())
self.main.conf['protocol'] = str(self.protocol.GetStringSelection())
qos = []
for name, el in self.qos:
if el.GetValue():
qos.append(name)
self.main.conf['qos'] = qos
bs = []
for name, el in self.bs:
if el.GetValue():
bs.append(name)
self.main.conf['bs'] = bs
vq = []
for name, el in self.vq:
if el.GetValue():
vq.append(name)
self.main.conf['vq'] = vq
def __setResults(self, results):
'''
Plot measures and show *Results* tab.
'''
self.results_tab.removePages()
for measure in results:
axes = self.results_tab.add(measure['name']).gca()
if measure['type'] == 'plot':
axes.plot(measure['axes'][0], measure['axes'][1], 'b')
axes.plot(measure['axes'][0], [measure['mean'] for i in measure['axes'][0]], 'g')
axes.plot(measure['axes'][0], [measure['max'][1] for i in measure['axes'][0]], 'r')
axes.plot(measure['axes'][0], [measure['min'][1] for i in measure['axes'][0]], 'r')
axes.set_xlabel(measure['units'][0])
axes.set_ylabel(measure['units'][1])
elif measure['type'] == 'value':
width = 1
axes.bar([0.5], measure['value'], width=width)
axes.set_ylabel(measure['units'])
axes.set_xticks([1])
axes.set_xlim(0, 2)
axes.set_xticklabels([measure['name']])
elif measure['type'] == 'bar':
axes.bar(measure['axes'][0], measure['axes'][1], width=measure['width'])
axes.plot(measure['axes'][0], [measure['mean'] for i in measure['axes'][0]], 'g')
axes.plot(measure['axes'][0], [measure['max'][1] for i in measure['axes'][0]], 'r')
axes.plot(measure['axes'][0], [measure['min'][1] for i in measure['axes'][0]], 'r')
axes.set_xlabel(measure['units'][0])
axes.set_ylabel(measure['units'][1])
elif measure['type'] == 'videoframes':
axes.bar(measure['axes'][0], measure['axes'][1]['B'], width=1, color='g')
axes.bar(measure['axes'][0], measure['axes'][1]['P'], width=1, color='b')
axes.bar(measure['axes'][0], measure['axes'][1]['I'], width=1, color='r')
axes.set_xlabel(measure['units'][0])
axes.set_ylabel(measure['units'][1])
self.results_tab.Show()
class Plot(wx.Panel):
'''
Plot panel.
'''
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
'''
Tab-style plotting panel.
'''
def __init__(self, parent, id = -1):
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
self.pages = []
def add(self, name='plot'):
'''
Add a tab.
'''
page = Plot(self.nb)
self.pages.append(page)
self.nb.AddPage(page, name)
return page.figure
def removePages(self):
'''
Remove all tabs.
'''
for page in self.pages:
self.nb.DeletePage(0)
class VTApp(wx.App):
'''
WxPython application class.
'''
def __init__(self, conf):
self.conf = conf
wx.App.__init__(self)
def OnInit(self):
vtframe = VTframe(self.conf)
self.SetTopWindow(vtframe)
vtframe.Show()
return True
| gpl-3.0 |
vidartf/hyperspy | hyperspy/drawing/_widgets/scalebar.py | 2 | 5335 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from hyperspy.misc.math_tools import closest_nice_number
class ScaleBar(object):
def __init__(self, ax, units, pixel_size=None, color='white',
position=None, max_size_ratio=0.25, lw=2, length=None,
animated=False):
"""Add a scale bar to an image.
Parameteres
-----------
ax : matplotlib axes
The axes where to draw the scale bar.
units : string
pixel_size : {None, float}
If None the axes of the image are supposed to be calibrated.
Otherwise the pixel size must be specified.
color : a valid matplotlib color
position {None, (float, float)}
If None the position is automatically determined.
max_size_ratio : float
The maximum size of the scale bar in respect to the
length of the x axis
lw : int
The line width
length : {None, float}
If None the length is automatically calculated using the
max_size_ratio.
"""
self.animated = animated
self.ax = ax
self.units = units
self.pixel_size = pixel_size
self.xmin, self.xmax = ax.get_xlim()
self.ymin, self.ymax = ax.get_ylim()
self.text = None
self.line = None
self.tex_bold = False
if length is None:
self.calculate_size(max_size_ratio=max_size_ratio)
else:
self.length = length
if position is None:
self.position = self.calculate_line_position()
else:
self.position = position
self.calculate_text_position()
self.plot_scale(line_width=lw)
self.set_color(color)
def get_units_string(self):
if self.tex_bold is True:
if (self.units[0] and self.units[-1]) == '$':
return r'$\mathbf{%g\,%s}$' % \
(self.length, self.units[1:-1])
else:
return r'$\mathbf{%g\,}$\textbf{%s}' % \
(self.length, self.units)
else:
return r'$%g\,$%s' % (self.length, self.units)
def calculate_line_position(self, pad=0.05):
return ((1 - pad) * self.xmin + pad * self.xmax,
(1 - pad) * self.ymin + pad * self.ymax)
def calculate_text_position(self, pad=1 / 100.):
ps = self.pixel_size if self.pixel_size is not None else 1
x1, y1 = self.position
x2, y2 = x1 + self.length / ps, y1
self.text_position = ((x1 + x2) / 2.,
y2 + (self.ymax - self.ymin) / ps * pad)
def calculate_size(self, max_size_ratio=0.25):
ps = self.pixel_size if self.pixel_size is not None else 1
size = closest_nice_number(ps * (self.xmax - self.xmin) *
max_size_ratio)
self.length = size
def remove(self):
if self.line is not None:
self.ax.lines.remove(self.line)
if self.text is not None:
self.ax.texts.remove(self.text)
def plot_scale(self, line_width=1):
self.remove()
ps = self.pixel_size if self.pixel_size is not None else 1
x1, y1 = self.position
x2, y2 = x1 + self.length / ps, y1
self.line, = self.ax.plot([x1, x2], [y1, y2],
linestyle='-',
lw=line_width,
animated=self.animated)
self.text = self.ax.text(*self.text_position,
s=self.get_units_string(),
ha='center',
size='medium',
animated=self.animated)
self.ax.set_xlim(self.xmin, self.xmax)
self.ax.set_ylim(self.ymin, self.ymax)
self.ax.figure.canvas.draw()
def _set_position(self, x, y):
self.position = x, y
self.calculate_text_position()
self.plot_scale(line_width=self.line.get_linewidth())
def set_color(self, c):
self.line.set_color(c)
self.text.set_color(c)
self.ax.figure.canvas.draw_idle()
def set_length(self, length):
color = self.line.get_color()
self.length = length
self.calculate_scale_size()
self.calculate_text_position()
self.plot_scale(line_width=self.line.get_linewidth())
self.set_color(color)
def set_tex_bold(self):
self.tex_bold = True
self.text.set_text(self.get_units_string())
self.ax.figure.canvas.draw_idle()
| gpl-3.0 |
olologin/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
JohnWinter/ThinkStats2 | code/analytic.py | 69 | 6265 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import numpy as np
import pandas
import nsfg
import thinkplot
import thinkstats2
def ParetoMedian(xmin, alpha):
"""Computes the median of a Pareto distribution."""
return xmin * pow(2, 1/alpha)
def MakeExpoCdf():
"""Generates a plot of the exponential CDF."""
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r'$\lambda=%g$' % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_expo_cdf',
title='Exponential CDF',
xlabel='x',
ylabel='CDF')
def ReadBabyBoom(filename='babyboom.dat'):
"""Reads the babyboom data.
filename: string
returns: DataFrame
"""
var_info = [
('time', 1, 8, int),
('sex', 9, 16, int),
('weight_g', 17, 24, int),
('minutes', 25, 32, int),
]
columns = ['name', 'start', 'end', 'type']
variables = pandas.DataFrame(var_info, columns=columns)
variables.end += 1
dct = thinkstats2.FixedWidthVariables(variables, index_base=1)
df = dct.ReadFixedWidth(filename, skiprows=59)
return df
def MakeBabyBoom():
"""Plot CDF of interarrival time on log and linear scales.
"""
# compute the interarrival times
df = ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label='actual')
thinkplot.PrePlot(cols=2)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='minutes',
ylabel='CDF',
legend=False)
thinkplot.SubPlot(2)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel='minutes',
ylabel='CCDF',
yscale='log',
legend=False)
thinkplot.Save(root='analytic_interarrivals',
legend=False)
def MakeParetoCdf():
"""Generates a plot of the Pareto CDF."""
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r'$\alpha=%g$' % alpha)
thinkplot.Save(root='analytic_pareto_cdf',
title='Pareto CDF',
xlabel='x',
ylabel='CDF')
def MakeParetoCdf2():
"""Generates a plot of the CDF of height in Pareto World."""
xmin = 100
alpha = 1.7
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 1000.0, n=100)
thinkplot.Plot(xs, ps)
thinkplot.Save(root='analytic_pareto_height',
title='Pareto CDF',
xlabel='height (cm)',
ylabel='CDF',
legend=False)
def MakeNormalCdf():
"""Generates a plot of the normal CDF."""
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma,
low=-1.0, high=4.0)
label = r'$\mu=%g$, $\sigma=%g$' % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Save(root='analytic_normal_cdf',
title='Normal CDF',
xlabel='x',
ylabel='CDF',
loc=2)
def MakeNormalModel(weights):
"""Plot the CDF of birthweights with a normal model."""
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print('Mean, Var', mu, var)
# plot the model
sigma = math.sqrt(var)
print('Sigma', sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
# plot the data
cdf = thinkstats2.Cdf(weights, label='data')
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Save(root='analytic_birthwgt_model',
title='Birth weights',
xlabel='birth weight (lbs)',
ylabel='CDF')
def MakeExampleNormalPlot():
"""Generates a sample normal probability plot.
"""
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = '$\mu=%d$, $\sigma=%d$' % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Save(root='analytic_normal_prob_example',
title='Normal probability plot',
xlabel='standard normal sample',
ylabel='sample values')
def MakeNormalPlot(weights, term_weights):
"""Generates a normal probability plot of birth weights."""
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = math.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color='0.8')
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label='all live')
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label='full term')
thinkplot.Save(root='analytic_birthwgt_normal',
title='Normal probability plot',
xlabel='Standard deviations from mean',
ylabel='Birth weight (lbs)')
def main():
thinkstats2.RandomSeed(18)
MakeExampleNormalPlot()
# make the analytic CDFs
MakeExpoCdf()
MakeBabyBoom()
MakeParetoCdf()
MakeParetoCdf2()
MakeNormalCdf()
# test the distribution of birth weights for normality
preg = nsfg.ReadFemPreg()
full_term = preg[preg.prglngth >= 37]
weights = preg.totalwgt_lb.dropna()
term_weights = full_term.totalwgt_lb.dropna()
MakeNormalModel(weights)
MakeNormalPlot(weights, term_weights)
if __name__ == "__main__":
main()
| gpl-3.0 |
kashif/scikit-learn | sklearn/utils/tests/test_extmath.py | 19 | 21979 | # Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(300, 1000, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
print(error_2 - error_20)
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
print(error_2 - error)
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
HasanIssa88/EMG_Classification | Linear _Reg_Max.py | 1 | 1805 | import sklearn
import scipy.stats as stats
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pylab as plt
df=pd.read_csv('02_02_precision_max_C_1.txt',sep=',',skiprows=13,header=None,na_values="null",delimiter=',')
df.columns=['Force_sensor','EMG_radial_1','EMG_radial_2','EMG_radial_3','EMG_radial_4',
'EMG_radial_5','EMG_radial_6','EMG_special_1','EMG_special_2','EMG_special_3','EMG_special_4']
df2=pd.read_csv('02_01_precision_05_050.txt',sep=',',skiprows=13,header=None,na_values="null",delimiter=',')
df2.columns=['Force_sensor','EMG_radial_1','EMG_radial_2','EMG_radial_3','EMG_radial_4',
'EMG_radial_5','EMG_radial_6','EMG_special_1','EMG_special_2','EMG_special_3','EMG_special_4']
# Droping the force sensor column
X=df.drop(['Force_sensor','EMG_special_4','EMG_special_2','EMG_special_3','EMG_special_1'],inplace=False,axis=1,errors='ignore')
X_train, X_test,Y_train,Y_test=sklearn.cross_validation.train_test_split(X,df.Force_sensor,test_size=0.33,random_state=5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
lm=LinearRegression()
lm.fit(X_train, Y_train)
pred_train=lm.predict(X_train)
pred_test=lm.predict(X_test)
print('Fit a model X_train, and calculate MSE with Y_train: :',np.mean(Y_train-lm.predict(X_train))**2)
print ('Fit a model X_train, and calculate MSE with X_test, Y_test ',np.mean(Y_test-lm.predict(X_test))**2)
plt.scatter(lm.predict(X_train),lm.predict(X_train)-Y_train,c='b',s=40,alpha=0.5)
plt.scatter(lm.predict(X_test),lm.predict(X_test)-Y_test,c='g',s=40)
plt.hlines(y=0,xmin=0,xmax=50)
plt.title('Residual plot using training (BLUE) and test(GREEN) data')
plt.ylabel('Residuals')
plt.show()
| gpl-3.0 |
mne-tools/mne-tools.github.io | 0.21/_downloads/5f5e63d32397437d1a83eaaa5e2e20fd/plot_read_proj.py | 5 | 2128 |
"""
==============================================
Read and visualize projections (SSP and other)
==============================================
This example shows how to read and visualize Signal Subspace Projectors (SSP)
vector. Such projections are sometimes referred to as PCA projections.
"""
# Author: Joan Massich <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import read_proj
from mne.io import read_raw_fif
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
###############################################################################
# Load the FIF file and display the projections present in the file. Here the
# projections are added to the file during the acquisition and are obtained
# from empty room recordings.
raw = read_raw_fif(fname)
empty_room_proj = raw.info['projs']
# Display the projections stored in `info['projs']` from the raw object
raw.plot_projs_topomap()
###############################################################################
# Display the projections one by one
n_cols = len(empty_room_proj)
fig, axes = plt.subplots(1, n_cols, figsize=(2 * n_cols, 2))
for proj, ax in zip(empty_room_proj, axes):
proj.plot_topomap(axes=ax, info=raw.info)
###############################################################################
# Use the function in `mne.viz` to display a list of projections
assert isinstance(empty_room_proj, list)
mne.viz.plot_projs_topomap(empty_room_proj, info=raw.info)
###############################################################################
# .. TODO: add this when the tutorial is up: "As shown in the tutorial
# :doc:`../auto_tutorials/preprocessing/plot_projectors`, ..."
#
# The ECG projections can be loaded from a file and added to the raw object
# read the projections
ecg_projs = read_proj(ecg_fname)
# add them to raw and plot everything
raw.add_proj(ecg_projs)
raw.plot_projs_topomap()
| bsd-3-clause |
Dewpal/Humidistat | DHT22.py | 1 | 1400 | #!/usr/bin/python
import sys
import time
import datetime
import Adafruit_DHT
import plotly
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
# Parse command line parameters.
sensor = Adafruit_DHT.DHT22
pin = 21
# connect to plotly
plotly.tools.set_credentials_file(username='bramDeJaeg', api_key='jemhzjyun0')
# Parameters for data storage
Ts= 1; # sampling time (s)
nStore= 5 # number of datapoints to store
i=1
data= pd.DataFrame({'Time': 0,'Temperature': 0,'Humidity': 0}, columns=['Time','Temperature','Humidity'],index=range(0,nStore-1))
for i in range(0,nStore-1):
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
data.loc[i]=pd.Series({'Time': datetime.datetime.now(),'Temperature': temperature,'Humidity': humidity})
else:
print('missed reading')
time.sleep(Ts)
i=i+1
trace=go.Scatter(
x= data.Time,
y= data.Humidity,
stream=dict(
token= "0f1psssxtu",
maxpoints= 200
)
)
layout = go.Layout(
title='RPi, DHT-sensor Data'
)
fig=go.Figure(data=[trace], layout=layout)
py.plot(fig,filename = 'basic_TH',)
stream=py.Stream('0f1psssxtu')
stream.open()
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
i=i+1
stream.write({'x': datetime.datetime.now(), 'y': humidity})
time.sleep(Ts)
| mit |
HoverHell/mplh5canvas | examples/multi_plot.py | 4 | 1357 | #!/usr/bin/python
"""Testbed for the animation functionality of the backend, with multiple figures.
It basically produces an long series of frames that get animated on the client
browser side, this time with two figures.
"""
import matplotlib
matplotlib.use('module://mplh5canvas.backend_h5canvas')
from pylab import *
import time
def refresh_data(ax):
t = arange(0.0 + count, 2.0 + count, 0.01)
s = sin(2*pi*t)
ax.lines[0].set_xdata(t)
ax.lines[0].set_ydata(s)
ax.set_xlim(t[0],t[-1])
t = arange(0.0, 2.0, 0.01)
s = sin(2*pi*t)
plot(t, s, linewidth=1.0)
xlabel('time (s)')
ylabel('voltage (mV)')
title('Frist Post')
f = gcf()
ax = f.gca()
count = 0
f2 = figure()
ax2 = f2.gca()
ax2.set_xlabel('IMDB rating')
ax2.set_ylabel('South African Connections')
ax2.set_title('Luds chart...')
ax2.plot(arange(0.0, 5 + count, 0.01), arange(0.0, 5 + count, 0.01))
show(block=False, layout=2)
# show the figure manager but don't block script execution so animation works..
# layout=2 overrides the default layout manager which only shows a single plot in the browser window
while True:
refresh_data(ax)
d = arange(0.0, 5 + count, 0.01)
ax2.lines[0].set_xdata(d)
ax2.lines[0].set_ydata(d)
ax2.set_xlim(d[0],d[-1])
ax2.set_ylim(d[0],d[-1])
f.canvas.draw()
f2.canvas.draw()
count += 0.01
time.sleep(1)
| bsd-3-clause |
tjlaboss/openmc | openmc/data/resonance_covariance.py | 10 | 27136 | from collections.abc import MutableSequence
import warnings
import io
import copy
import numpy as np
import pandas as pd
from . import endf
import openmc.checkvalue as cv
from .resonance import Resonances
def _add_file2_contributions(file32params, file2params):
"""Function for aiding in adding resonance parameters from File 2 that are
not always present in File 32. Uses already imported resonance data.
Paramaters
----------
file32params : pandas.Dataframe
Incomplete set of resonance parameters contained in File 32.
file2params : pandas.Dataframe
Resonance parameters from File 2. Ordered by energy.
Returns
-------
parameters : pandas.Dataframe
Complete set of parameters ordered by L-values and then energy
"""
# Use l-values and competitiveWidth from File 2 data
# Re-sort File 2 by energy to match File 32
file2params = file2params.sort_values(by=['energy'])
file2params.reset_index(drop=True, inplace=True)
# Sort File 32 parameters by energy as well (maintaining index)
file32params.sort_values(by=['energy'], inplace=True)
# Add in values (.values converts to array first to ignore index)
file32params['L'] = file2params['L'].values
if 'competitiveWidth' in file2params.columns:
file32params['competitiveWidth'] = file2params['competitiveWidth'].values
# Resort to File 32 order (by L then by E) for use with covariance
file32params.sort_index(inplace=True)
return file32params
class ResonanceCovariances(Resonances):
"""Resolved resonance covariance data
Parameters
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
Attributes
----------
ranges : list of openmc.data.ResonanceCovarianceRange
Distinct energy ranges for resonance data
"""
@property
def ranges(self):
return self._ranges
@ranges.setter
def ranges(self, ranges):
cv.check_type('resonance ranges', ranges, MutableSequence)
self._ranges = cv.CheckedList(ResonanceCovarianceRange,
'resonance range', ranges)
@classmethod
def from_endf(cls, ev, resonances):
"""Generate resonance covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
resonances : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ResonanceCovariances
Resonance covariance data
"""
file_obj = io.StringIO(ev.section[32, 151])
# Determine whether discrete or continuous representation
items = endf.get_head_record(file_obj)
n_isotope = items[4] # Number of isotopes
ranges = []
for iso in range(n_isotope):
items = endf.get_cont_record(file_obj)
abundance = items[1]
fission_widths = (items[3] == 1) # Flag for fission widths
n_ranges = items[4] # Number of resonance energy ranges
for j in range(n_ranges):
items = endf.get_cont_record(file_obj)
# Unresolved flags - 0: only scattering radius given
# 1: resolved parameters given
# 2: unresolved parameters given
unresolved_flag = items[2]
formalism = items[3] # resonance formalism
# Throw error for unsupported formalisms
if formalism in [0, 7]:
error = 'LRF='+str(formalism)+' covariance not supported '\
'for this formalism'
raise NotImplementedError(error)
if unresolved_flag in (0, 1):
# Resolved resonance region
resonance = resonances.ranges[j]
erange = _FORMALISMS[formalism].from_endf(ev, file_obj,
items, resonance)
ranges.append(erange)
elif unresolved_flag == 2:
warn = 'Unresolved resonance not supported. Covariance '\
'values for the unresolved region not imported.'
warnings.warn(warn)
return cls(ranges)
class ResonanceCovarianceRange:
"""Resonace covariance range. Base class for different formalisms.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max):
self.energy_min = energy_min
self.energy_max = energy_max
def subset(self, parameter_str, bounds):
"""Produce a subset of resonance parameters and the corresponding
covariance matrix to an IncidentNeutron object.
Parameters
----------
parameter_str : str
parameter to be discriminated
(i.e. 'energy', 'captureWidth', 'fissionWidthA'...)
bounds : np.array
[low numerical bound, high numerical bound]
Returns
-------
res_cov_range : openmc.data.ResonanceCovarianceRange
ResonanceCovarianceRange object that contains a subset of the
covariance matrix (upper triangular) as well as a subset parameters
within self.file2params
"""
# Copy range and prevent change of original
res_cov_range = copy.deepcopy(self)
parameters = self.file2res.parameters
cov = res_cov_range.covariance
mpar = res_cov_range.mpar
# Create mask
mask1 = parameters[parameter_str] >= bounds[0]
mask2 = parameters[parameter_str] <= bounds[1]
mask = mask1 & mask2
res_cov_range.parameters = parameters[mask]
indices = res_cov_range.parameters.index.values
# Build subset of covariance
sub_cov_dim = len(indices)*mpar
cov_subset_vals = []
for index1 in indices:
for i in range(mpar):
for index2 in indices:
for j in range(mpar):
if index2*mpar+j >= index1*mpar+i:
cov_subset_vals.append(cov[index1*mpar+i,
index2*mpar+j])
cov_subset = np.zeros([sub_cov_dim, sub_cov_dim])
tri_indices = np.triu_indices(sub_cov_dim)
cov_subset[tri_indices] = cov_subset_vals
res_cov_range.file2res.parameters = parameters[mask]
res_cov_range.covariance = cov_subset
return res_cov_range
def sample(self, n_samples):
"""Sample resonance parameters based on the covariances provided
within an ENDF evaluation.
Parameters
----------
n_samples : int
The number of samples to produce
Returns
-------
samples : list of openmc.data.ResonanceCovarianceRange objects
List of samples size `n_samples`
"""
warn_str = 'Sampling routine does not guarantee positive values for '\
'parameters. This can lead to undefined behavior in the '\
'reconstruction routine.'
warnings.warn(warn_str)
parameters = self.parameters
cov = self.covariance
# Symmetrizing covariance matrix
cov = cov + cov.T - np.diag(cov.diagonal())
formalism = self.formalism
mpar = self.mpar
samples = []
# Handling MLBW/SLBW sampling
if formalism == 'mlbw' or formalism == 'slbw':
params = ['energy', 'neutronWidth', 'captureWidth', 'fissionWidth',
'competitiveWidth']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gf = sample[3::mpar] if mpar > 3 else parameters['fissionWidth'].values
gx = sample[4::mpar] if mpar > 4 else parameters['competitiveWidth'].values
gt = gn + gg + gf + gx
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gt[j],
gn[j], gg[j], gf[j], gx[j]])
columns = ['energy', 'L', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth', 'competitiveWidth']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
# Handling RM sampling
elif formalism == 'rm':
params = ['energy', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
param_list = params[:mpar]
mean_array = parameters[param_list].values
mean = mean_array.flatten()
par_samples = np.random.multivariate_normal(mean, cov,
size=n_samples)
spin = parameters['J'].values
l_value = parameters['L'].values
for sample in par_samples:
energy = sample[0::mpar]
gn = sample[1::mpar]
gg = sample[2::mpar]
gfa = sample[3::mpar] if mpar > 3 else parameters['fissionWidthA'].values
gfb = sample[4::mpar] if mpar > 3 else parameters['fissionWidthB'].values
records = []
for j, E in enumerate(energy):
records.append([energy[j], l_value[j], spin[j], gn[j],
gg[j], gfa[j], gfb[j]])
columns = ['energy', 'L', 'J', 'neutronWidth',
'captureWidth', 'fissionWidthA', 'fissionWidthB']
sample_params = pd.DataFrame.from_records(records,
columns=columns)
# Copy ResonanceRange object
res_range = copy.copy(self.file2res)
res_range.parameters = sample_params
samples.append(res_range)
return samples
class MultiLevelBreitWignerCovariance(ResonanceCovarianceRange):
"""Multi-level Breit-Wigner resolved resonance formalism covariance data.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'mlbw'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create MLBW covariance data from an ENDF evaluation.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=32, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
Returns
-------
openmc.data.MultiLevelBreitWignerCovariance
Multi-level Breit-Wigner resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gt = res_values[2::6]
gn = res_values[3::6]
gg = res_values[4::6]
gf = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
mean = items
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gt = values[2::12]
gn = values[3::12]
gg = values[4::12]
gf = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided, no fission width)
# DAJ/DGT always zero, DGF sometimes nonzero [1, 2, 5]
res_unc_nonzero = []
for j in range(6):
if j in [1, 2, 5] and res_unc[j] != 0.0:
res_unc_nonzero.append(res_unc[j])
elif j in [0, 3, 4]:
res_unc_nonzero.append(res_unc[j])
par_unc.extend(res_unc_nonzero)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gt[i], gn[i],
gg[i], gf[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Compatible resolved resonance format
elif lcomp == 0:
cov = np.zeros([4, 4])
records = []
cov_index = 0
for i in range(nls):
items, values = endf.get_list_record(file_obj)
num_res = items[5]
for j in range(num_res):
one_res = values[18*j:18*(j+1)]
res_values = one_res[:6]
cov_values = one_res[6:]
records.append(list(res_values))
# Populate the coviariance matrix for this resonance
# There are no covariances between resonances in lcomp=0
cov[cov_index, cov_index] = cov_values[0]
cov[cov_index+1, cov_index+1 : cov_index+2] = cov_values[1:2]
cov[cov_index+1, cov_index+3] = cov_values[4]
cov[cov_index+2, cov_index+2] = cov_values[3]
cov[cov_index+2, cov_index+3] = cov_values[5]
cov[cov_index+3, cov_index+3] = cov_values[6]
cov_index += 4
if j < num_res-1: # Pad matrix for additional values
cov = np.pad(cov, ((0, 4), (0, 4)), 'constant',
constant_values=0)
# Create pandas DataFrame with resonance data, currently
# redundant with data.IncidentNeutron.resonance
columns = ['energy', 'J', 'totalWidth', 'neutronWidth',
'captureWidth', 'fissionWidth']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of class
mlbw = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return mlbw
class SingleLevelBreitWignerCovariance(MultiLevelBreitWignerCovariance):
"""Single-level Breit-Wigner resolved resonance formalism covariance data.
Single-level Breit-Wigner resolved resonance data is is identified by LRF=1
in the ENDF-6 format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
mpar : int
Number of parameters in covariance matrix for each individual resonance
formalism : str
String descriptor of formalism
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res)
self.formalism = 'slbw'
class ReichMooreCovariance(ResonanceCovarianceRange):
"""Reich-Moore resolved resonance formalism covariance data.
Reich-Moore resolved resonance data is identified by LRF=3 in the ENDF-6
format.
Parameters
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
Attributes
----------
energy_min : float
Minimum energy of the resolved resonance range in eV
energy_max : float
Maximum energy of the resolved resonance range in eV
parameters : pandas.DataFrame
Resonance parameters
covariance : numpy.array
The covariance matrix contained within the ENDF evaluation
lcomp : int
Flag indicating format of the covariance matrix within the ENDF file
mpar : int
Number of parameters in covariance matrix for each individual resonance
file2res : openmc.data.ResonanceRange object
Corresponding resonance range with File 2 data.
formalism : str
String descriptor of formalism
"""
def __init__(self, energy_min, energy_max, parameters, covariance, mpar,
lcomp, file2res):
super().__init__(energy_min, energy_max)
self.parameters = parameters
self.covariance = covariance
self.mpar = mpar
self.lcomp = lcomp
self.file2res = copy.copy(file2res)
self.formalism = 'rm'
@classmethod
def from_endf(cls, ev, file_obj, items, resonance):
"""Create Reich-Moore resonance covariance data from an ENDF
evaluation. Includes the resonance parameters contained separately in
File 32.
Parameters
----------
ev : openmc.data.endf.Evaluation
ENDF evaluation
file_obj : file-like object
ENDF file positioned at the second record of a resonance range
subsection in MF=2, MT=151
items : list
Items from the CONT record at the start of the resonance range
subsection
resonance : openmc.data.Resonance object
openmc.data.Resonanance object generated from the same evaluation
used to import values not contained in File 32
Returns
-------
openmc.data.ReichMooreCovariance
Reich-Moore resonance covariance parameters
"""
# Read energy-dependent scattering radius if present
energy_min, energy_max = items[0:2]
nro, naps = items[4:6]
if nro != 0:
params, ape = endf.get_tab1_record(file_obj)
# Other scatter radius parameters
items = endf.get_cont_record(file_obj)
target_spin = items[0]
lcomp = items[3] # Flag for compatibility 0, 1, 2 - 2 is compact form
nls = items[4] # Number of l-values
# Build covariance matrix for General Resolved Resonance Formats
if lcomp == 1:
items = endf.get_cont_record(file_obj)
# Number of short range type resonance covariances
num_short_range = items[4]
# Number of long range type resonance covariances
num_long_range = items[5]
# Read resonance widths, J values, etc
channel_radius = {}
scattering_radius = {}
records = []
for i in range(num_short_range):
items, values = endf.get_list_record(file_obj)
mpar = items[2]
num_res = items[5]
num_par_vals = num_res*6
res_values = values[:num_par_vals]
cov_values = values[num_par_vals:]
energy = res_values[0::6]
spin = res_values[1::6]
gn = res_values[2::6]
gg = res_values[3::6]
gfa = res_values[4::6]
gfb = res_values[5::6]
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
# Build the upper-triangular covariance matrix
cov_dim = mpar*num_res
cov = np.zeros([cov_dim, cov_dim])
indices = np.triu_indices(cov_dim)
cov[indices] = cov_values
# Compact format - Resonances and individual uncertainties followed by
# compact correlations
elif lcomp == 2:
items, values = endf.get_list_record(file_obj)
num_res = items[5]
energy = values[0::12]
spin = values[1::12]
gn = values[2::12]
gg = values[3::12]
gfa = values[4::12]
gfb = values[5::12]
par_unc = []
for i in range(num_res):
res_unc = values[i*12+6 : i*12+12]
# Delete 0 values (not provided in evaluation)
res_unc = [x for x in res_unc if x != 0.0]
par_unc.extend(res_unc)
records = []
for i, E in enumerate(energy):
records.append([energy[i], spin[i], gn[i], gg[i],
gfa[i], gfb[i]])
corr = endf.get_intg_record(file_obj)
cov = np.diag(par_unc).dot(corr).dot(np.diag(par_unc))
# Create pandas DataFrame with resonacne data
columns = ['energy', 'J', 'neutronWidth', 'captureWidth',
'fissionWidthA', 'fissionWidthB']
parameters = pd.DataFrame.from_records(records, columns=columns)
# Determine mpar (number of parameters for each resonance in
# covariance matrix)
nparams, params = parameters.shape
covsize = cov.shape[0]
mpar = int(covsize/nparams)
# Add parameters from File 2
parameters = _add_file2_contributions(parameters,
resonance.parameters)
# Create instance of ReichMooreCovariance
rmc = cls(energy_min, energy_max, parameters, cov, mpar, lcomp,
resonance)
return rmc
_FORMALISMS = {
0: ResonanceCovarianceRange,
1: SingleLevelBreitWignerCovariance,
2: MultiLevelBreitWignerCovariance,
3: ReichMooreCovariance
# 7: RMatrixLimitedCovariance
}
| mit |
khkaminska/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
btabibian/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 82 | 1671 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is
completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y, edgecolor='black', s=20)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
rishuatgithub/MLPy | fruits_with_colors_KNNlearn.py | 1 | 1673 | #Building to Model to predict fruits data with colors using SKlearn and KNN
# Author: Rishu Shrivastava ([email protected])
# Date : June 4, 2017
import numpy as np
import matplotlib.pyplot as mp
import pandas as pd
from sklearn.model_selection import train_test_split
from matplotlib import cm
from sklearn.neighbors import KNeighborsClassifier
#reading data
fruits = pd.read_table('./data/fruit_data_with_colors.txt')
print("Displaying sample rows of Flower data set")
print(fruits.head())
#create a mapping from fruit label value to fruit name to make results easier to interpret
print("Lookup fruit names to make it easier to interpret the prediction")
lookup_fruit_name = dict(zip(fruits.fruit_label.unique(), fruits.fruit_name.unique()))
print(lookup_fruit_name)
#plotting scatter matrix
X = fruits[['height', 'width', 'mass']]
y = fruits['fruit_label']
#creating a train and test data set. Split it in 75%/25%
print("Generating train and test dataset")
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
cmap = cm.get_cmap('gnuplot')
scatter = pd.scatter_matrix(X_train, c= y_train, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap=cmap)
mp.show()
# Training the Dataset using KNN algorithm | neighbours=5
print("Training KNNeighbour Classifier")
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, y_train)
print("The ACCURACY score = ",knn.score(X_test,y_test))
# first example: a small fruit with mass 20g, width 4.3 cm, height 5.5 cm
fruit_prediction = knn.predict([[20, 4.3, 5.5]])
print("PREDICTING fruit with mass 20g, width 4.3 cm, height 5.5 cm : ",lookup_fruit_name[fruit_prediction[0]])
| apache-2.0 |
DonBeo/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/io/tests/test_csv.py | 2 | 39060 | from __future__ import print_function, division, absolute_import
from io import BytesIO
import os
import gzip
from time import sleep
import pytest
pd = pytest.importorskip('pandas')
dd = pytest.importorskip('dask.dataframe')
from toolz import partition_all, valmap
import pandas.util.testing as tm
import dask
import dask.dataframe as dd
from dask.base import compute_as_if_collection
from dask.dataframe.io.csv import (text_blocks_to_pandas, pandas_read_text,
auto_blocksize)
from dask.dataframe.utils import assert_eq, has_known_categories, PANDAS_VERSION
from dask.bytes.core import read_bytes
from dask.utils import filetexts, filetext, tmpfile, tmpdir
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10) for fmt in seekable_files]
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def parse_filename(path):
return os.path.split(path)[1]
csv_text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
""".strip()
tsv_text = csv_text.replace(',', '\t')
tsv_text2 = """
name amount
Alice 100
Bob -200
Charlie 300
Dennis 400
Edith -500
Frank 600
Alice 200
Frank -200
Bob 600
Alice 400
Frank 200
Alice 300
Edith 600
""".strip()
timeseries = """
Date,Open,High,Low,Close,Volume,Adj Close
2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005
2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004
2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993
2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996
2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003
2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005
2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995
2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999
2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993
""".strip()
csv_files = {'2014-01-01.csv': (b'name,amount,id\n'
b'Alice,100,1\n'
b'Bob,200,2\n'
b'Charlie,300,3\n'),
'2014-01-02.csv': (b'name,amount,id\n'),
'2014-01-03.csv': (b'name,amount,id\n'
b'Dennis,400,4\n'
b'Edith,500,5\n'
b'Frank,600,6\n')}
tsv_files = {k: v.replace(b',', b'\t') for (k, v) in csv_files.items()}
expected = pd.concat([pd.read_csv(BytesIO(csv_files[k]))
for k in sorted(csv_files)])
comment_header = b"""# some header lines
# that may be present
# in a data file
# before any data"""
csv_and_table = pytest.mark.parametrize('reader,files',
[(pd.read_csv, csv_files),
(pd.read_table, tsv_files)])
@csv_and_table
def test_pandas_read_text(reader, files):
b = files['2014-01-01.csv']
df = pandas_read_text(reader, b, b'', {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
@csv_and_table
def test_pandas_read_text_kwargs(reader, files):
b = files['2014-01-01.csv']
df = pandas_read_text(reader, b, b'', {'usecols': ['name', 'id']})
assert list(df.columns) == ['name', 'id']
@csv_and_table
def test_pandas_read_text_dtype_coercion(reader, files):
b = files['2014-01-01.csv']
df = pandas_read_text(reader, b, b'', {}, {'amount': 'float'})
assert df.amount.dtype == 'float'
@csv_and_table
def test_pandas_read_text_with_header(reader, files):
b = files['2014-01-01.csv']
header, b = b.split(b'\n', 1)
header = header + b'\n'
df = pandas_read_text(reader, b, header, {})
assert list(df.columns) == ['name', 'amount', 'id']
assert len(df) == 3
assert df.id.sum() == 1 + 2 + 3
@csv_and_table
def test_text_blocks_to_pandas_simple(reader, files):
blocks = [[files[k]] for k in sorted(files)]
kwargs = {}
head = pandas_read_text(reader, files['2014-01-01.csv'], b'', {})
header = files['2014-01-01.csv'].split(b'\n')[0] + b'\n'
df = text_blocks_to_pandas(reader, blocks, header, head, kwargs,
collection=True)
assert isinstance(df, dd.DataFrame)
assert list(df.columns) == ['name', 'amount', 'id']
values = text_blocks_to_pandas(reader, blocks, header, head, kwargs,
collection=False)
assert isinstance(values, list)
assert len(values) == 3
assert all(hasattr(item, 'dask') for item in values)
assert_eq(df.amount.sum(),
100 + 200 + 300 + 400 + 500 + 600)
@csv_and_table
def test_text_blocks_to_pandas_kwargs(reader, files):
blocks = [files[k] for k in sorted(files)]
blocks = [[b] for b in blocks]
kwargs = {'usecols': ['name', 'id']}
head = pandas_read_text(reader, files['2014-01-01.csv'], b'', kwargs)
header = files['2014-01-01.csv'].split(b'\n')[0] + b'\n'
df = text_blocks_to_pandas(reader, blocks, header, head, kwargs,
collection=True)
assert list(df.columns) == ['name', 'id']
result = df.compute()
assert (result.columns == df.columns).all()
@csv_and_table
def test_text_blocks_to_pandas_blocked(reader, files):
header = files['2014-01-01.csv'].split(b'\n')[0] + b'\n'
blocks = []
for k in sorted(files):
b = files[k]
lines = b.split(b'\n')
blocks.append([b'\n'.join(bs) for bs in partition_all(2, lines)])
df = text_blocks_to_pandas(reader, blocks, header, expected.head(), {})
assert_eq(df.compute().reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
expected2 = expected[['name', 'id']]
df = text_blocks_to_pandas(reader, blocks, header, expected2.head(),
{'usecols': ['name', 'id']})
assert_eq(df.compute().reset_index(drop=True),
expected2.reset_index(drop=True), check_dtype=False)
@pytest.mark.parametrize('dd_read,pd_read,files',
[(dd.read_csv, pd.read_csv, csv_files),
(dd.read_table, pd.read_table, tsv_files)])
def test_skiprows(dd_read, pd_read, files):
files = {name: comment_header + b'\n' + content for name, content in files.items()}
skip = len(comment_header.splitlines())
with filetexts(files, mode='b'):
df = dd_read('2014-01-*.csv', skiprows=skip)
expected_df = pd.concat([pd_read(n, skiprows=skip) for n in sorted(files)])
assert_eq(df, expected_df, check_dtype=False)
csv_blocks = [[b'aa,bb\n1,1.0\n2,2.0', b'10,20\n30,40'],
[b'aa,bb\n1,1.0\n2,2.0', b'10,20\n30,40']]
tsv_blocks = [[b'aa\tbb\n1\t1.0\n2\t2.0', b'10\t20\n30\t40'],
[b'aa\tbb\n1\t1.0\n2\t2.0', b'10\t20\n30\t40']]
@pytest.mark.parametrize('reader,blocks', [(pd.read_csv, csv_blocks),
(pd.read_table, tsv_blocks)])
def test_enforce_dtypes(reader, blocks):
head = reader(BytesIO(blocks[0][0]), header=0)
header = blocks[0][0].split(b'\n')[0] + b'\n'
dfs = text_blocks_to_pandas(reader, blocks, header, head, {},
collection=False)
dfs = dask.compute(*dfs, scheduler='sync')
assert all(df.dtypes.to_dict() == head.dtypes.to_dict() for df in dfs)
@pytest.mark.parametrize('reader,blocks', [(pd.read_csv, csv_blocks),
(pd.read_table, tsv_blocks)])
def test_enforce_columns(reader, blocks):
# Replace second header with different column name
blocks = [blocks[0], [blocks[1][0].replace(b'a', b'A'), blocks[1][1]]]
head = reader(BytesIO(blocks[0][0]), header=0)
header = blocks[0][0].split(b'\n')[0] + b'\n'
with pytest.raises(ValueError):
dfs = text_blocks_to_pandas(reader, blocks, header, head, {},
collection=False, enforce=True)
dask.compute(*dfs, scheduler='sync')
#############################
# read_csv and read_table #
#############################
@pytest.mark.parametrize('dd_read,pd_read,text,sep',
[(dd.read_csv, pd.read_csv, csv_text, ','),
(dd.read_table, pd.read_table, tsv_text, '\t'),
(dd.read_table, pd.read_table, tsv_text2, '\s+')])
def test_read_csv(dd_read, pd_read, text, sep):
with filetext(text) as fn:
f = dd_read(fn, blocksize=30, lineterminator=os.linesep, sep=sep)
assert list(f.columns) == ['name', 'amount']
# index may be different
result = f.compute(scheduler='sync').reset_index(drop=True)
assert_eq(result, pd_read(fn, sep=sep))
@pytest.mark.parametrize('dd_read,pd_read,files',
[(dd.read_csv, pd.read_csv, csv_files),
(dd.read_table, pd.read_table, tsv_files)])
def test_read_csv_files(dd_read, pd_read, files):
with filetexts(files, mode='b'):
df = dd_read('2014-01-*.csv')
assert_eq(df, expected, check_dtype=False)
fn = '2014-01-01.csv'
df = dd_read(fn)
expected2 = pd_read(BytesIO(files[fn]))
assert_eq(df, expected2, check_dtype=False)
@pytest.mark.parametrize('dd_read,pd_read,files',
[(dd.read_csv, pd.read_csv, csv_files),
(dd.read_table, pd.read_table, tsv_files)])
def test_read_csv_files_list(dd_read, pd_read, files):
with filetexts(files, mode='b'):
subset = sorted(files)[:2] # Just first 2
sol = pd.concat([pd_read(BytesIO(files[k])) for k in subset])
res = dd_read(subset)
assert_eq(res, sol, check_dtype=False)
with pytest.raises(ValueError):
dd_read([])
@pytest.mark.parametrize('dd_read,files',
[(dd.read_csv, csv_files),
(dd.read_table, tsv_files)])
def test_read_csv_include_path_column(dd_read, files):
with filetexts(files, mode='b'):
df = dd_read('2014-01-*.csv', include_path_column=True,
converters={'path': parse_filename})
filenames = df.path.compute().unique()
assert '2014-01-01.csv' in filenames
assert '2014-01-02.csv' not in filenames
assert '2014-01-03.csv' in filenames
@pytest.mark.parametrize('dd_read,files',
[(dd.read_csv, csv_files),
(dd.read_table, tsv_files)])
def test_read_csv_include_path_column_as_str(dd_read, files):
with filetexts(files, mode='b'):
df = dd_read('2014-01-*.csv', include_path_column='filename',
converters={'filename': parse_filename})
filenames = df.filename.compute().unique()
assert '2014-01-01.csv' in filenames
assert '2014-01-02.csv' not in filenames
assert '2014-01-03.csv' in filenames
@pytest.mark.parametrize('dd_read,files',
[(dd.read_csv, csv_files),
(dd.read_table, tsv_files)])
def test_read_csv_include_path_column_with_duplicate_name(dd_read, files):
with filetexts(files, mode='b'):
with pytest.raises(ValueError):
dd_read('2014-01-*.csv', include_path_column='name')
@pytest.mark.parametrize('dd_read,files',
[(dd.read_csv, csv_files),
(dd.read_table, tsv_files)])
def test_read_csv_include_path_column_is_dtype_category(dd_read, files):
with filetexts(files, mode='b'):
df = dd_read('2014-01-*.csv', include_path_column=True)
assert df.path.dtype == 'category'
assert has_known_categories(df.path)
dfs = dd_read('2014-01-*.csv', include_path_column=True, collection=False)
result = dfs[0].compute()
assert result.path.dtype == 'category'
assert has_known_categories(result.path)
# After this point, we test just using read_csv, as all functionality
# for both is implemented using the same code.
def test_read_csv_index():
with filetext(csv_text) as fn:
f = dd.read_csv(fn, blocksize=20).set_index('amount')
result = f.compute(scheduler='sync')
assert result.index.name == 'amount'
blocks = compute_as_if_collection(dd.DataFrame, f.dask,
f.__dask_keys__(),
scheduler='sync')
for i, block in enumerate(blocks):
if i < len(f.divisions) - 2:
assert (block.index < f.divisions[i + 1]).all()
if i > 0:
assert (block.index >= f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
assert_eq(result, expected)
def test_usecols():
with filetext(timeseries) as fn:
df = dd.read_csv(fn, blocksize=30, usecols=['High', 'Low'])
expected = pd.read_csv(fn, usecols=['High', 'Low'])
assert (df.compute().values == expected.values).all()
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, blocksize=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, blocksize=30)
assert df.amount.compute().dtype == float
def test_consistent_dtypes_2():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
with filetexts({'foo.1.csv': text1, 'foo.2.csv': text2}):
df = dd.read_csv('foo.*.csv', blocksize=25)
assert df.name.dtype == object
assert df.name.compute().dtype == object
@pytest.mark.skipif(PANDAS_VERSION < '0.19.2',
reason="Not available in pandas <= 0.19.2")
def test_categorical_dtypes():
text1 = normalize_text("""
fruit,count
apple,10
apple,25
pear,100
orange,15
""")
text2 = normalize_text("""
fruit,count
apple,200
banana,300
orange,400
banana,10
""")
with filetexts({'foo.1.csv': text1, 'foo.2.csv': text2}):
df = dd.read_csv('foo.*.csv', dtype={'fruit': 'category'}, blocksize=25)
assert df.fruit.dtype == 'category'
assert not has_known_categories(df.fruit)
res = df.compute()
assert res.fruit.dtype == 'category'
assert (sorted(res.fruit.cat.categories) ==
['apple', 'banana', 'orange', 'pear'])
@pytest.mark.skipif(PANDAS_VERSION < '0.21.0',
reason="Uses CategoricalDtype")
def test_categorical_known():
text1 = normalize_text("""
A,B
a,a
b,b
a,a
""")
text2 = normalize_text("""
A,B
a,a
b,b
c,c
""")
dtype = pd.api.types.CategoricalDtype(['a', 'b', 'c'])
with filetexts({"foo.1.csv": text1, "foo.2.csv": text2}):
result = dd.read_csv("foo.*.csv", dtype={"A": 'category',
"B": 'category'})
assert result.A.cat.known is False
assert result.B.cat.known is False
expected = pd.DataFrame({
"A": pd.Categorical(['a', 'b', 'a', 'a', 'b', 'c'],
categories=dtype.categories),
"B": pd.Categorical(['a', 'b', 'a', 'a', 'b', 'c'],
categories=dtype.categories)},
index=[0, 1, 2, 0, 1, 2])
assert_eq(result, expected)
# Specify a dtype
result = dd.read_csv("foo.*.csv", dtype={'A': dtype, 'B': 'category'})
assert result.A.cat.known is True
assert result.B.cat.known is False
tm.assert_index_equal(result.A.cat.categories, dtype.categories)
assert result.A.cat.ordered is False
assert_eq(result, expected)
# ordered
dtype = pd.api.types.CategoricalDtype(['a', 'b', 'c'], ordered=True)
result = dd.read_csv("foo.*.csv", dtype={'A': dtype, 'B': 'category'})
expected['A'] = expected['A'].cat.as_ordered()
assert result.A.cat.known is True
assert result.B.cat.known is False
assert result.A.cat.ordered is True
assert_eq(result, expected)
# Specify "unknown" categories
result = dd.read_csv("foo.*.csv",
dtype=pd.api.types.CategoricalDtype())
assert result.A.cat.known is False
result = dd.read_csv("foo.*.csv", dtype="category")
assert result.A.cat.known is False
@pytest.mark.slow
def test_compression_multiple_files():
with tmpdir() as tdir:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(csv_text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(csv_text.encode())
f.close()
with tm.assert_produces_warning(UserWarning):
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'),
compression='gzip')
assert len(df.compute()) == (len(csv_text.split('\n')) - 1) * 2
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn, header=0)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_read_csv_sensitive_to_enforce():
with filetexts(csv_files, mode='b'):
a = dd.read_csv('2014-01-*.csv', enforce=True)
b = dd.read_csv('2014-01-*.csv', enforce=False)
assert a._name != b._name
@pytest.mark.parametrize('fmt,blocksize', fmt_bs)
def test_read_csv_compression(fmt, blocksize):
files2 = valmap(compress[fmt], csv_files)
with filetexts(files2, mode='b'):
df = dd.read_csv('2014-01-*.csv', compression=fmt, blocksize=blocksize)
assert_eq(df.compute(scheduler='sync').reset_index(drop=True),
expected.reset_index(drop=True), check_dtype=False)
def test_warn_non_seekable_files():
files2 = valmap(compress['gzip'], csv_files)
with filetexts(files2, mode='b'):
with pytest.warns(UserWarning) as w:
df = dd.read_csv('2014-01-*.csv', compression='gzip')
assert df.npartitions == 3
assert len(w) == 1
msg = str(w[0].message)
assert 'gzip' in msg
assert 'blocksize=None' in msg
with pytest.warns(None) as w:
df = dd.read_csv('2014-01-*.csv', compression='gzip',
blocksize=None)
assert len(w) == 0
with pytest.raises(NotImplementedError):
with pytest.warns(UserWarning): # needed for pytest
df = dd.read_csv('2014-01-*.csv', compression='foo')
def test_windows_line_terminator():
text = 'a,b\r\n1,2\r\n2,3\r\n3,4\r\n4,5\r\n5,6\r\n6,7'
with filetext(text) as fn:
df = dd.read_csv(fn, blocksize=5, lineterminator='\r\n')
assert df.b.sum().compute() == 2 + 3 + 4 + 5 + 6 + 7
assert df.a.sum().compute() == 1 + 2 + 3 + 4 + 5 + 6
def test_header_None():
with filetexts({'.tmp.1.csv': '1,2',
'.tmp.2.csv': '',
'.tmp.3.csv': '3,4'}):
df = dd.read_csv('.tmp.*.csv', header=None)
expected = pd.DataFrame({0: [1, 3], 1: [2, 4]})
assert_eq(df.compute().reset_index(drop=True), expected)
def test_auto_blocksize():
assert isinstance(auto_blocksize(3000, 15), int)
assert auto_blocksize(3000, 3) == 100
assert auto_blocksize(5000, 2) == 250
def test_auto_blocksize_max64mb():
blocksize = auto_blocksize(1000000000000, 3)
assert blocksize == int(64e6)
assert isinstance(blocksize, int)
def test_auto_blocksize_csv(monkeypatch):
psutil = pytest.importorskip('psutil')
try:
from unittest import mock
except ImportError:
mock = pytest.importorskip('mock')
total_memory = psutil.virtual_memory().total
cpu_count = psutil.cpu_count()
mock_read_bytes = mock.Mock(wraps=read_bytes)
monkeypatch.setattr(dask.dataframe.io.csv, 'read_bytes', mock_read_bytes)
expected_block_size = auto_blocksize(total_memory, cpu_count)
with filetexts(csv_files, mode='b'):
dd.read_csv('2014-01-01.csv')
assert mock_read_bytes.called
assert mock_read_bytes.call_args[1]['blocksize'] == expected_block_size
def test_head_partial_line_fix():
files = {'.overflow1.csv': ('a,b\n'
'0,"abcdefghijklmnopqrstuvwxyz"\n'
'1,"abcdefghijklmnopqrstuvwxyz"'),
'.overflow2.csv': ('a,b\n'
'111111,-11111\n'
'222222,-22222\n'
'333333,-33333\n')}
with filetexts(files):
# 64 byte file, 52 characters is mid-quote; this should not cause exception in head-handling code.
dd.read_csv('.overflow1.csv', sample=52)
# 35 characters is cuts off before the second number on the last line
# Should sample to end of line, otherwise pandas will infer `b` to be
# a float dtype
df = dd.read_csv('.overflow2.csv', sample=35)
assert (df.dtypes == 'i8').all()
def test_read_csv_raises_on_no_files():
fn = '.not.a.real.file.csv'
try:
dd.read_csv(fn)
assert False
except (OSError, IOError) as e:
assert fn in str(e)
def test_read_csv_has_deterministic_name():
with filetext(csv_text) as fn:
a = dd.read_csv(fn)
b = dd.read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
assert isinstance(a._name, str)
c = dd.read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
with filetexts({'_foo.1.csv': csv_text, '_foo.2.csv': csv_text}):
a = dd.read_csv('_foo.*.csv')
b = dd.read_csv('_foo.*.csv')
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
def test_csv_with_integer_names():
with filetext('alice,1\nbob,2') as fn:
df = dd.read_csv(fn, header=None)
assert list(df.columns) == [0, 1]
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(csv_text) as fn:
sleep(1)
a = dd.read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = dd.read_csv(fn)
assert sorted(a.dask, key=str) != sorted(b.dask, key=str)
def test_late_dtypes():
text = 'numbers,names,more_numbers,integers,dates\n'
for i in range(1000):
text += '1,,2,3,2017-10-31 00:00:00\n'
text += '1.5,bar,2.5,3,4998-01-01 00:00:00\n'
date_msg = ("\n"
"\n"
"-------------------------------------------------------------\n"
"\n"
"The following columns also failed to properly parse as dates:\n"
"\n"
"- dates\n"
"\n"
"This is usually due to an invalid value in that column. To\n"
"diagnose and fix it's recommended to drop these columns from the\n"
"`parse_dates` keyword, and manually convert them to dates later\n"
"using `dd.to_datetime`.")
with filetext(text) as fn:
sol = pd.read_csv(fn)
msg = ("Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"+--------------+---------+----------+\n"
"| Column | Found | Expected |\n"
"+--------------+---------+----------+\n"
"| more_numbers | float64 | int64 |\n"
"| names | object | float64 |\n"
"| numbers | float64 | int64 |\n"
"+--------------+---------+----------+\n"
"\n"
"- names\n"
" ValueError(.*)\n"
"\n"
"Usually this is due to dask's dtype inference failing, and\n"
"*may* be fixed by specifying dtypes manually by adding:\n"
"\n"
"dtype={'more_numbers': 'float64',\n"
" 'names': 'object',\n"
" 'numbers': 'float64'}\n"
"\n"
"to the call to `read_csv`/`read_table`.")
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50,
parse_dates=['dates']).compute(scheduler='sync')
assert e.match(msg + date_msg)
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50).compute(scheduler='sync')
assert e.match(msg)
msg = ("Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"+--------------+---------+----------+\n"
"| Column | Found | Expected |\n"
"+--------------+---------+----------+\n"
"| more_numbers | float64 | int64 |\n"
"| numbers | float64 | int64 |\n"
"+--------------+---------+----------+\n"
"\n"
"Usually this is due to dask's dtype inference failing, and\n"
"*may* be fixed by specifying dtypes manually by adding:\n"
"\n"
"dtype={'more_numbers': 'float64',\n"
" 'numbers': 'float64'}\n"
"\n"
"to the call to `read_csv`/`read_table`.\n"
"\n"
"Alternatively, provide `assume_missing=True` to interpret\n"
"all unspecified integer columns as floats.")
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50,
dtype={'names': 'O'}).compute(scheduler='sync')
assert str(e.value) == msg
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50, parse_dates=['dates'],
dtype={'names': 'O'}).compute(scheduler='sync')
assert str(e.value) == msg + date_msg
msg = ("Mismatched dtypes found in `pd.read_csv`/`pd.read_table`.\n"
"\n"
"The following columns failed to properly parse as dates:\n"
"\n"
"- dates\n"
"\n"
"This is usually due to an invalid value in that column. To\n"
"diagnose and fix it's recommended to drop these columns from the\n"
"`parse_dates` keyword, and manually convert them to dates later\n"
"using `dd.to_datetime`.")
with pytest.raises(ValueError) as e:
dd.read_csv(fn, sample=50, parse_dates=['dates'],
dtype={'more_numbers': float, 'names': object,
'numbers': float}).compute(scheduler='sync')
assert str(e.value) == msg
# Specifying dtypes works
res = dd.read_csv(fn, sample=50,
dtype={'more_numbers': float, 'names': object,
'numbers': float})
assert_eq(res, sol)
def test_assume_missing():
text = 'numbers,names,more_numbers,integers\n'
for i in range(1000):
text += '1,foo,2,3\n'
text += '1.5,bar,2.5,3\n'
with filetext(text) as fn:
sol = pd.read_csv(fn)
# assume_missing affects all columns
res = dd.read_csv(fn, sample=50, assume_missing=True)
assert_eq(res, sol.astype({'integers': float}))
# assume_missing doesn't override specified dtypes
res = dd.read_csv(fn, sample=50, assume_missing=True,
dtype={'integers': 'int64'})
assert_eq(res, sol)
# assume_missing works with dtype=None
res = dd.read_csv(fn, sample=50, assume_missing=True, dtype=None)
assert_eq(res, sol.astype({'integers': float}))
text = 'numbers,integers\n'
for i in range(1000):
text += '1,2\n'
text += '1.5,2\n'
with filetext(text) as fn:
sol = pd.read_csv(fn)
# assume_missing ignored when all dtypes specifed
df = dd.read_csv(fn, sample=30, dtype='int64', assume_missing=True)
assert df.numbers.dtype == 'int64'
def test_index_col():
with filetext(csv_text) as fn:
try:
dd.read_csv(fn, blocksize=30, index_col='name')
assert False
except ValueError as e:
assert 'set_index' in str(e)
def test_read_csv_with_datetime_index_partitions_one():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# blocksize set to explicitly set to single chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
blocksize=10000000).set_index('Date')
assert_eq(df, ddf)
# because fn is so small, by default, this will only be one chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date']).set_index('Date')
assert_eq(df, ddf)
def test_read_csv_with_datetime_index_partitions_n():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# because fn is so small, by default, set chunksize small
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
blocksize=400).set_index('Date')
assert_eq(df, ddf)
@pytest.mark.parametrize('encoding', ['utf-16', 'utf-16-le', 'utf-16-be'])
def test_encoding_gh601(encoding):
ar = pd.Series(range(0, 100))
br = ar % 7
cr = br * 3.3
dr = br / 1.9836
test_df = pd.DataFrame({'a': ar, 'b': br, 'c': cr, 'd': dr})
with tmpfile('.csv') as fn:
test_df.to_csv(fn, encoding=encoding, index=False)
a = pd.read_csv(fn, encoding=encoding)
d = dd.read_csv(fn, encoding=encoding, blocksize=1000)
d = d.compute()
d.index = range(len(d.index))
assert_eq(d, a)
def test_read_csv_header_issue_823():
text = '''a b c-d\n1 2 3\n4 5 6'''.replace(' ', '\t')
with filetext(text) as fn:
df = dd.read_csv(fn, sep='\t')
assert_eq(df, pd.read_csv(fn, sep='\t'))
df = dd.read_csv(fn, delimiter='\t')
assert_eq(df, pd.read_csv(fn, delimiter='\t'))
def test_none_usecols():
with filetext(csv_text) as fn:
df = dd.read_csv(fn, usecols=None)
assert_eq(df, pd.read_csv(fn, usecols=None))
def test_parse_dates_multi_column():
pdmc_text = normalize_text("""
ID,date,time
10,2003-11-04,180036
11,2003-11-05,125640
12,2003-11-01,2519
13,2003-10-22,142559
14,2003-10-24,163113
15,2003-10-20,170133
16,2003-11-11,160448
17,2003-11-03,171759
18,2003-11-07,190928
19,2003-10-21,84623
20,2003-10-25,192207
21,2003-11-13,180156
22,2003-11-15,131037
""")
with filetext(pdmc_text) as fn:
ddf = dd.read_csv(fn, parse_dates=[['date', 'time']])
df = pd.read_csv(fn, parse_dates=[['date', 'time']])
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_read_csv_sep():
sep_text = normalize_text("""
name###amount
alice###100
bob###200
charlie###300""")
with filetext(sep_text) as fn:
ddf = dd.read_csv(fn, sep="###", engine="python")
df = pd.read_csv(fn, sep="###", engine="python")
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_read_csv_slash_r():
data = b'0,my\n1,data\n' * 1000 + b'2,foo\rbar'
with filetext(data, mode='wb') as fn:
dd.read_csv(fn, header=None, sep=',', lineterminator='\n',
names=['a', 'b'], blocksize=200).compute(scheduler='sync')
def test_read_csv_singleton_dtype():
data = b'a,b\n1,2\n3,4\n5,6'
with filetext(data, mode='wb') as fn:
assert_eq(pd.read_csv(fn, dtype=float),
dd.read_csv(fn, dtype=float))
def test_robust_column_mismatch():
files = csv_files.copy()
k = sorted(files)[-1]
files[k] = files[k].replace(b'name', b'Name')
with filetexts(files, mode='b'):
ddf = dd.read_csv('2014-01-*.csv')
df = pd.read_csv('2014-01-01.csv')
assert (df.columns == ddf.columns).all()
assert_eq(ddf, ddf)
def test_error_if_sample_is_too_small():
text = ('AAAAA,BBBBB,CCCCC,DDDDD,EEEEE\n'
'1,2,3,4,5\n'
'6,7,8,9,10\n'
'11,12,13,14,15')
with filetext(text) as fn:
# Sample size stops mid header row
sample = 20
with pytest.raises(ValueError):
dd.read_csv(fn, sample=sample)
# Saying no header means this is fine
assert_eq(dd.read_csv(fn, sample=sample, header=None),
pd.read_csv(fn, header=None))
skiptext = ('# skip\n'
'# these\n'
'# lines\n')
text = skiptext + text
with filetext(text) as fn:
# Sample size stops mid header row
sample = 20 + len(skiptext)
with pytest.raises(ValueError):
dd.read_csv(fn, sample=sample, skiprows=3)
# Saying no header means this is fine
assert_eq(dd.read_csv(fn, sample=sample, header=None, skiprows=3),
pd.read_csv(fn, header=None, skiprows=3))
def test_read_csv_names_not_none():
text = ('Alice,100\n'
'Bob,-200\n'
'Charlie,300\n'
'Dennis,400\n'
'Edith,-500\n'
'Frank,600\n')
names = ['name', 'amount']
with filetext(text) as fn:
ddf = dd.read_csv(fn, names=names, blocksize=16)
df = pd.read_csv(fn, names=names)
assert_eq(df, ddf, check_index=False)
############
# to_csv #
############
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]})
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpdir() as dn:
a.to_csv(dn, index=False)
result = dd.read_csv(os.path.join(dn, '*')).compute().reset_index(drop=True)
assert_eq(result, df)
with tmpdir() as dn:
r = a.to_csv(dn, index=False, compute=False)
dask.compute(*r, scheduler='sync')
result = dd.read_csv(os.path.join(dn, '*')).compute().reset_index(drop=True)
assert_eq(result, df)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.csv')
a.to_csv(fn, index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
def test_to_csv_multiple_files_cornercases():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]})
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
with pytest.raises(ValueError):
fn = os.path.join(dn, "data_*_*.csv")
a.to_csv(fn)
df16 = pd.DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'],
'y': [1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16]})
a = dd.from_pandas(df16, 16)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.csv')
a.to_csv(fn, index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df16)
# test handling existing files when links are optimized out
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
a.to_csv(dn, index=False)
fn = os.path.join(dn, 'data_*.csv')
a.to_csv(fn, mode='w', index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df)
# test handling existing files when links are optimized out
a = dd.from_pandas(df16, 16)
with tmpdir() as dn:
a.to_csv(dn, index=False)
fn = os.path.join(dn, 'data_*.csv')
a.to_csv(fn, mode='w', index=False)
result = dd.read_csv(fn).compute().reset_index(drop=True)
assert_eq(result, df16)
@pytest.mark.xfail(reason="to_csv does not support compression")
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_simple():
df0 = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
df = dd.from_pandas(df0, npartitions=2)
with tmpdir() as dir:
dir = str(dir)
df.to_csv(dir)
assert os.listdir(dir)
result = dd.read_csv(os.path.join(dir, '*')).compute()
assert (result.x.values == df0.x.values).all()
def test_to_csv_series():
df0 = pd.Series(['a', 'b', 'c', 'd'], index=[1., 2., 3., 4.])
df = dd.from_pandas(df0, npartitions=2)
with tmpdir() as dir:
dir = str(dir)
df.to_csv(dir)
assert os.listdir(dir)
result = dd.read_csv(os.path.join(dir, '*'), header=None,
names=['x']).compute()
assert (result.x == df0).all()
def test_to_csv_with_get():
from dask.multiprocessing import get as mp_get
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
with tmpdir() as dn:
ddf.to_csv(dn, index=False, get=my_get)
assert flag[0]
result = dd.read_csv(os.path.join(dn, '*')).compute().reset_index(drop=True)
assert_eq(result, df)
def test_to_csv_paths():
df = pd.DataFrame({"A": range(10)})
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.to_csv("foo*.csv") == ['foo0.csv', 'foo1.csv']
os.remove('foo0.csv')
os.remove('foo1.csv')
| gpl-3.0 |
samzhang111/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
lvwang2002/python-data-mining-platform | pymining/classifier/smo_csvc.py | 8 | 33742 | import math
import matplotlib
import matplotlib.pyplot as plt
import numpy
import os
import pickle
import psyco
psyco.full()
import sys
import time
from ..common.global_info import GlobalInfo
from ..common.configuration import Configuration
from ..math.matrix import Matrix
from ..math.text2matrix import Text2Matrix
from numpy import *
from operator import itemgetter
class Svm_Param:
'''the parameter configuration of csvc.'''
def __init__(self,config, nodeName):
try:
self.curNode = config.GetChild(nodeName)
#-------------------begin model info-------------------------------
self.modelpath = self.curNode.GetChild("model_path").GetValue()
self.logpath = self.curNode.GetChild("log_path").GetValue()
#store penalty coefficient of slack variable.
self.C = float(self.curNode.GetChild("c").GetValue())
#store a number nearly zero.
self.eps = float(self.curNode.GetChild("eps").GetValue())
#store tolerance of KKT conditions.
self.tolerance = float(self.curNode.GetChild("tolerance").GetValue())
#-------------------end model info-------------------------------
#-------------------begin times info-------------------------------
#log frequency
self.times = int(self.curNode.GetChild("times").GetValue())
#-------------------end times info-------------------------------
#-------------------begin kernel info-------------------------------
self.kernelnode = self.curNode.GetChild("kernel")
#to get kernel's type.
self.kernel_type = self.kernelnode.GetChild("name").GetValue();
#to get parameters from top to button -> from left to right -> from inner to outer.
self.parameters = self.kernelnode.GetChild("parameters").GetValue().split(',')
#-------------------end kernel info-------------------------------
#to get size of cache.
self.cachesize = float(self.curNode.GetChild("cachesize").GetValue())
#matrix is dense or sparse.
self.isdense = False
except Exception as detail:
print 'to read configuration file error,detail is:', detail
class Svm_Model:
'''the support vector machine model.'''
def __init__(self,config, nodeName):
#the configuration of svc.
self.config = Svm_Param(config, nodeName)
#the number of support vector machines.
self.svn = 0
#alpha
self.alpha = []
#the support vector.
self.sv = None
#the label of sv.
self.label = []
#the weight of model.
self.w = []
#the bias of model.
self.b = 0.0
class Svm_Util:
'''utilities of support vector machine.'''
@staticmethod
def dot( sparam, trainx, trainy, i, j):
'''to calculate dot product of two dense matrix or sparse matrix.'''
if trainx == None or trainy == None:
print 'train matrix should not be empty.'
return -1
if i < 0 or j < 0:
print 'index must bigger then zero.'
return -1
try:
#isdense = True -> dense matrix,isdense = False -> sparse matrix,
isdense = sparam.isdense
#the training set or sv is sparse matrix.
if isdense == False:
if trainx.nCol <> trainy.nCol:
print "the dimension of trainx and trainy must be equal. "
return -1
if i >= trainx.nRow or j >= trainy.nRow:
print "index i and j out. "
return -1
sum = 0.0
# to calculate dot product with O(nlgn)
i1 = trainx.rows[i]
i2 = trainy.rows[j]
p1 = 0 #the elements number of row i
p2 = 0 #the elements number of row j
if i < len(trainx.rows)-1 :
p1 = trainx.rows[i+1] - trainx.rows[i]
if j < len(trainy.rows)-1 :
p2 = trainy.rows[j+1] - trainy.rows[j]
if p2 <= p1:
curlow = i1
for k in range(i2, i2+p2):
pos = Svm_Util.binary_search(trainx.cols,curlow,i1+p1-1,trainy.cols[k])
if pos != -1:
sum += trainy.vals[k] * trainx.vals[pos]
curlow = pos + 1
else:
curlow = i2
for k in range(i1, i1+p1):
pos = Svm_Util.binary_search(trainx.cols,curlow,i2+p2-1,trainx.cols[k])
if pos != -1:
sum += trainx.vals[k] * trainy.vals[pos]
curlow = pos + 1
return sum
else:
if i >= trainx.shape[0] or j >= trainy.shape[0]:
print "index i or j out. "
return -1
if trainx.ndim <> trainy.ndim or trainx.shape[1] <> trainy.shape[1]:
print 'the dimension of two object is not equal.'
return -1
return float(numpy.dot(trainx[i].tolist()[0], trainy[j].tolist()[0]))
except Exception as detail:
print 'dot product error,detail:', detail
@staticmethod
def binary_search(collist,low,high,value):
'''sorted list's binary search'''
try:
if low < 0 or high < 0 or low > high or len(collist) <= high or len(collist) <= low:
return -1
if value < collist[low] or value > collist[high]:
return -1
if value == collist[low]:
return low
if value == collist[high]:
return high
l = low
h = high
while(l<=h):
mid = (l+h)/2
if collist[mid] > value:
h = mid - 1
elif collist[mid] < value:
l = mid + 1
else:
return mid
except Exception as detail:
print 'binary_search error detail is:', detail
return -1
@staticmethod
def convert(sparam, vec):
'''To convert vector to matrix.'''
if sparam.isdense == False:
rows = [0]
cols = []
vals = []
for i in range(len(vec)):
if vec[i] <> 0:
cols.append(i)
vals.append(vec[i])
rows.append(len(cols))
return Matrix(rows, cols, vals, 1, len(vec))
else:
return matrix(vec)
@staticmethod
def RBF(sparam,trainx, trainy,xi,yi):
'''the RBF kernel.'''
paramlist = sparam.parameters
eta = Svm_Util.dot(sparam, trainx, trainx,xi,xi)+Svm_Util.dot(sparam, trainy, trainy,yi,yi) - 2*Svm_Util.dot(sparam, trainx, trainy,xi,yi)
res = 0.0
if eta <0:
res = math.exp(sparam.tolerance*float(paramlist[0]))
else:
res = math.exp(-eta*float(paramlist[0]))
return res
@staticmethod
def kernel_function(sparam, trainx, trainy):
'''the kernel function.'''
paramlist = sparam.parameters
kernel_type = sparam.kernel_type
if kernel_type == 'RBF':
return lambda xi,yi: Svm_Util.RBF(sparam,trainx, trainy,xi,yi)
elif kernel_type == 'Linear':
return lambda xi,yi:Svm_Util.dot(sparam, trainx, trainy,xi,yi) + float(paramlist[0])
elif kernel_type == 'Polynomial':
return lambda xi,yi: (float(paramlist[0]) * Svm_Util.dot(sparam, trainx, trainy, xi,yi) + float(paramlist[1])) ** int(paramlist[2])
elif kernel_type == 'Sigmoid':
return lambda xi,yi: math.tanh(float(paramlist[0]) * Svm_Util.dot(sparam, trainx, trainy,xi,yi) + float(paramlist[1]))
@staticmethod
def check_float_int(p,t):
'''to Check the value of p can be transformed into a float (t = 0) or integer (t = 1).'''
try:
if t == 0:
tmp = float(p)
elif t == 1:
tmp = int(p)
except:
tmp = ''
if (isinstance(tmp,float) and t == 0) or (isinstance(tmp,int) and t == 1):
return True
else:
return False
@staticmethod
def draw_scatter(xOffsets, yOffsets, xlabel = 'X', ylabel = 'Y', colors = None):
'''to draw draw_scatter picture.'''
if (not isinstance(xOffsets,list)) or (not isinstance(yOffsets,list)):
print 'xOffsets and yOffsets should be list type.'
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0,1), ylim=(0,1))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if colors == None:
ax.scatter(xOffsets, yOffsets)
else:
ax.scatter(xOffsets, yOffsets, c=colors, alpha=0.75)
plt.show()
file_name = 'mining/scatter_' + time.ctime() + '.png'
plt.savefig(file_name)
@staticmethod
def draw_plot(xOffsets, yOffsets, xl = 'X', yl = 'Y', title = 'figure'):
'''to draw plot picture.'''
if (not isinstance(xOffsets,list)) or (not isinstance(yOffsets,list)):
print 'xOffsets and yOffsets should be list type.'
return
fig = plt.figure()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(0,1), ylim=(0,1))
plt.xlabel(xl)
plt.ylabel(yl)
ax.plot(xOffsets, yOffsets, lw=3, color='purple')
plt.title(title)
plt.show()
file_name = 'mining/plot_' + time.ctime() + '.png'
plt.savefig(file_name)
class Smo_Csvc:
'''a support vector machine classifier using 'C' to balance empirical risk and structural risk.'''
def __init__(self,config, nodeName, loadFromFile = False, recoverFromLog = False, npRatio = 1):
'''to initialize csvc.
config: configuration file.
nodeName: xml file's node.
loadFromFile: Whether to read the csvc model from disk.
recoverFromLog: Whether to recover the training procedure from disk.
npRatio: negative samples / positive samples.
'''
self.istrained = False
#alpha
self.alpha = []
#gradient array
self.G = []
#weight
self.w = []
#bias
self.b = 0.0
#caching kii
self.kcache = {}
#initialize svm model.
self.model = Svm_Model(config, nodeName)
#negative samples number divide positive samples.
self.npRatio = npRatio
#to get C1 and C2 for negative or positive samples.
if self.npRatio > 1:
self.C1 = self.model.config.C / self.npRatio
self.C2 = self.model.config.C
else:
self.C1 = self.model.config.C * self.npRatio
self.C2 = self.model.config.C
#to read model from disk.
if (loadFromFile):
try:
f = open(self.model.config.modelpath, "r")
modelStr = pickle.load(f)
self.model = pickle.loads(modelStr)
f.close()
self.istrained = True
except IOError:
pass
#to recover training from log file.
if recoverFromLog:
try:
f = open(self.model.config.logpath, "r")
modelStr = pickle.load(f)
[self.alpha,self.G,self.w,self.b,self.model] = pickle.loads(modelStr)
f.close()
except IOError:
pass
def check_config(self):
'''To check configuration file.'''
kernel = ['Linear', 'RBF', 'Polynomial', 'Sigmoid']
if self.model.config.kernel_type not in kernel:
print '~kernel type error.'
return False
if self.model.config.kernel_type == 'Linear' or self.model.config.kernel_type == 'RBF':
if len(self.model.config.parameters) != 1:
print '~Wrong number of parameters.'
return False
if not Svm_Util.check_float_int(self.model.config.parameters[0],0):
print '~Parameter type error. detail:',self.model.config.parameters[0],'should be float type.'
return False
else:
return True
if self.model.config.kernel_type == 'Polynomial':
if len(self.model.config.parameters) != 3:
print '~Wrong number of parameters.'
return False
if not (Svm_Util.check_float_int(self.model.config.parameters[0],0) and Svm_Util.check_float_int(self.model.config.parameters[1],0)):
print '~Parameter type error. detail:',self.model.config.parameters[0], ' and ',self.model.config.parameters[1],'should be float type.'
return False
elif not Svm_Util.check_float_int(self.model.config.parameters[2],1):
print '~Parameter type error. detail:',self.model.config.parameters[2], 'should be integer type.'
return False
else:
return True
if self.model.config.kernel_type == 'Sigmoid':
if len(self.model.config.parameters) != 2:
print '~Wrong number of parameters.'
return False
if not (Svm_Util.check_float_int(self.model.config.parameters[0],0) and Svm_Util.check_float_int(self.model.config.parameters[1],0)):
print '~Parameter type error. detail:',self.model.config.parameters[0], ' and ',self.model.config.parameters[1],'should be float type.'
return False
else:
return True
def GetValueFromKernelCache(self, i, i1, K, trainy):
'''To get kernel value from kernel cache.'''
key1 = '%s%s%s'%(str(i1), '-', str(i))
key2 = '%s%s%s'%(str(i), '-', str(i1))
if self.kcache.has_key(key1):
k = self.kcache[key1]
elif self.kcache.has_key(key2):
k = self.kcache[key2]
else:
k = K(i1,i)
if k < self.model.config.tolerance:
k = 0
self.kcache[key1] = k
return k
def ReduceCache(self):
'To free memory & to prevent memory leaks.'
try:
newcache = {}
if sys.getsizeof(self.kcache) > self.model.config.cachesize * (1024 **2):
for key in self.kcache.iterkeys():
kl = key.split('-')
if kl[0] == kl[1]:
newcache[key] = self.kcache[key]
self.kcache = 0
self.kcache = newcache
print 'To free memory success.'
except Exception as detail:
print 'To free memory error,detail:', detail
def SelectMaximumViolatingPair(self, trainy, K):
'''To find the maximum violating pair from all samples.'''
i = -1
G_max = float("-Infinity")
obj_min = float("Infinity")
for t in range(0, len(trainy)):
if (trainy[t] == 1 and (self.C2 - self.alpha[t]) > self.model.config.tolerance ) or (trainy[t] == -1 and self.alpha[t] > 0):
if -trainy[t] * self.G[t] >= G_max:
i = t
G_max = -trainy[t] * self.G[t]
j = -1
G_min = float("Infinity")
for t in range(0, len(trainy)):
if (trainy[t] == -1 and (self.C1 - self.alpha[t]) > self.model.config.tolerance ) or (trainy[t] == 1 and self.alpha[t] > 0) :
b = G_max + trainy[t] * self.G[t]
if -trainy[t] * self.G[t] <= G_min:
G_min = -trainy[t] * self.G[t]
if b > 0:
a = 0.0
try:
a = self.GetValueFromKernelCache(i, i, K, trainy) +self.GetValueFromKernelCache(t, t, K, trainy) - 2 * self.GetValueFromKernelCache(i, t, K, trainy)
if a <= 0:
a = self.model.config.tolerance
if -(b*b)/(2*a) <= obj_min:
j = t
obj_min = -(b*b)/(2*a)
except Exception as detail:
print 'error detail is:', detail
print 'Gap = ',G_max - G_min,'Fi=',trainy[i] * self.G[i],'Fj=',trainy[j] * self.G[j]
if G_max - G_min < self.model.config.eps:
return [-1, -1, float("Infinity")]
return [i, j, obj_min]
def W(self,trainy, alpha1new,alpha2newclipped,i,j,K):
'''To calculate W value.'''
alpha1 = self.alpha[i]
alpha2 = self.alpha[j]
y1 = trainy[i]
y2 = trainy[j]
s = y1 * y2
k11 = self.GetValueFromKernelCache(i, i, K, trainy)
k22 = self.GetValueFromKernelCache(j, j, K, trainy)
k12 = self.GetValueFromKernelCache(i, j, K, trainy)
w1 = alpha1new * (y1 * (-y1*self.G[i]) + alpha1 * k11 + s * alpha2 * k12)
w1 += alpha2newclipped * (y2 * (-y2*self.G[j]) + alpha2 * k22 + s * alpha1 * k12)
w1 = w1 - k11 * alpha1new * alpha1new/2 - k22 * alpha2newclipped * alpha2newclipped/2 - s * k12 * alpha1new * alpha2newclipped
return w1
def calculate_auc(self,output,label):
'''to calculate auc value.'''
if output == None or label == None:
return 0.0
pos, neg = 0, 0
for i in range(len(label)):
if label[i]>0:
pos+=1
else:
neg+=1
output = sorted(output, key=itemgetter(0), reverse=True)
tprlist = []
fprlist = []
tp, fp = 0., 0.
for i in range(len(output)):
if output[i][1]>0:
tp+=1
else:
fp+=1
tprlist.append(tp/pos)
fprlist.append(fp/neg)
auc = 0.
prev_rectangular_right_vertex = 0
tpr_max = 0
fpr_max = 0
for i in range(0,len(fprlist)):
if tpr_max < tprlist[i]:
tpr_max = tprlist[i]
if fpr_max < fprlist[i]:
fpr_max = fprlist[i]
if fprlist[i] != prev_rectangular_right_vertex:
auc += (fprlist[i] - prev_rectangular_right_vertex) * tprlist[i]
prev_rectangular_right_vertex = fprlist[i]
Svm_Util.draw_plot(fprlist, tprlist, 'FPR', 'TPR', 'ROC Curve(AUC = %.4f)' % auc)
return auc
def Train(self,trainx,trainy):
'''To train classifier.
trainx is training matrix and trainy is classifying label'''
if self.model.config.isdense == False:
if len(trainy) != trainx.nRow:
print "ERROR!, trainx.nRow should == len(y)"
return 0
else:
if trainx.shape[0] != len(trainy):
print "ERROR!, trainx.shape[0] should == trainy.shape[0]"
return 0
#to check configuration.
if not self.check_config():
return [0,0]
#to initialize all lagrange multipliers with zero.
nrow = 0
if self.model.config.isdense == True:
nrow = trainx.shape[0]
else:
nrow = trainx.nRow
ncol = 0
if self.model.config.isdense == True:
ncol = trainx.shape[1]
else:
ncol = trainx.nCol
for i in range(0,nrow):
self.alpha.append(0.0)
for i in range(0,nrow):
self.G.append(-1.0)
#to initialize w with zero.
for j in range(0,ncol):
self.w.append(float(0))
#to get kernel function.
K = Svm_Util.kernel_function(self.model.config, trainx, trainx)
#the value of objective function.
obj = 0.0
#the iterations.
iterations = 0
starttime = time.time()
while True:
begin = time.time()
#to select maximum violating pair.
[i, j, obj] = self.SelectMaximumViolatingPair(trainy, K)
if j == -1:
break
#-------------------------------------------------------------------begin to optimize lagrange multipiers i and j-------------------------------------------------------
L = 0.0 #the lower bound.
H = 0.0 #the upper bound
y1 = trainy[i] #sample i's label.
y2 = trainy[j] #sample j's label.
s = y1 * y2
alpha1 = self.alpha[i] #sample i's alpha value.
alpha2 = self.alpha[j] #sample j's alpha value.
#to store old alpha value of sample i and j.
oldalphai = self.alpha[i]
oldalphaj = self.alpha[j]
#the eta value.
eta = self.GetValueFromKernelCache(i, i, K, trainy) +self.GetValueFromKernelCache(j, j, K, trainy) - 2 * self.GetValueFromKernelCache(i, j, K, trainy)
#to calculate upper and lower bound.
if y1*y2 == -1:
gamma = alpha2 - alpha1
if y1 == -1:
if gamma > 0:
L = gamma
H = self.C2
else:
L = 0
H = self.C1 + gamma
else:
if gamma > 0:
L = gamma
H = self.C1
else:
L = 0
H = self.C2 + gamma
if y1*y2 == 1:
gamma = alpha2 + alpha1
if y1 == 1:
if gamma - self.C2 > 0:
L = gamma - self.C2
H = self.C2
else:
L = 0
H = gamma
else:
if gamma - self.C1 > 0:
L = gamma - self.C1
H = self.C1
else:
L = 0
H = gamma
if -eta < 0:
#to calculate apha2's new value
alpha2new = alpha2 + y2 * (y1*self.G[i] - y2*self.G[j])/eta
if alpha2new < L:
alpha2newclipped = L
elif alpha2new > H:
alpha2newclipped = H
else:
alpha2newclipped = alpha2new
else:
w1 = self.W(trainy, alpha1 + s * (alpha2 - L),L,i,j,K)
w2 = self.W(trainy, alpha1 + s * (alpha2 - H),H,i,j,K)
if w1 - w2 > self.model.config.eps:
alpha2newclipped = L
elif w2 - w1 > self.model.config.eps:
alpha2newclipped = H
else:
alpha2newclipped = alpha2
#to calculate aplha1
alpha1new = alpha1 + s * (alpha2 - alpha2newclipped)
if alpha1new < self.model.config.tolerance:
alpha2newclipped += s * alpha1new
alpha1new = 0
elif y1 == -1 and alpha1new > self.C1:
alpha2newclipped += s * (alpha1new - self.C1)
alpha1new = self.C1
elif y1 == 1 and alpha1new > self.C2:
alpha2newclipped += s * (alpha1new - self.C2)
alpha1new = self.C2
self.alpha[i] = alpha1new
self.alpha[j] = alpha2newclipped
#to deal with Linear kernel.
if self.model.config.kernel_type == 'Linear':
ncol = 0
if self.model.config.isdense == True:
ncol = trainx.shape[1]
else:
ncol = trainx.nCol
if self.model.config.isdense == True:
self.w += (alpha1new - alpha1) * y1 * trainx[i] + (alpha2newclipped - alpha2) * y2 *trainx[j]
else:
i1 = trainx.rows[i]
i2 = trainx.rows[j]
p1 = 0 #the elements number of row i
p2 = 0 #the elements number of row j
if i < len(trainx.rows)-1 :
p1 = trainx.rows[i+1] - trainx.rows[i]
if j < len(trainx.rows)-1 :
p2 = trainx.rows[j+1] - trainx.rows[j]
for k in range(i1, i1+p1-1):
self.w[trainx.cols[k]] += (alpha1new - alpha1) * y1 * trainx.vals[k]
for k in range(i2, i2+p2-1):
self.w[trainx.cols[k]] += (alpha2newclipped - alpha2) * y2 * trainx.vals[k]
#-------------------------------------------------------------------end to optimize lagrange multipiers i and j-------------------------------------------------------
deltaalphai = self.alpha[i] - oldalphai
deltaalphaj = self.alpha[j] - oldalphaj
#to update gradient.
for t in range(0, nrow):
try:
part1 = trainy[t] * trainy[i] * self.GetValueFromKernelCache(t, i, K, trainy) * deltaalphai
part2 = trainy[t] * trainy[j] * self.GetValueFromKernelCache(t, j, K, trainy) * deltaalphaj
self.G[t] += part1 + part2
except Exception as detail:
print 'error detail is:', detail
print 'alpha', i, '=',self.alpha[i],'alpha', j,'=', self.alpha[j], 'the objective function value =', obj
print time.time() - begin
iterations += 1
if iterations%self.model.config.times == 0:
#dump to log file.
f = open(self.model.config.logpath, "w")
log = [self.alpha,self.G,self.w,self.b,self.model]
modelStr = pickle.dumps(log,1)
pickle.dump(modelStr, f)
f.close()
self.ReduceCache()
#To store support vectors.
index = []
for i in range(0, len(self.alpha)):
if self.alpha[i] > 0:
index.append(i)
self.model.alpha.append(self.alpha[i])
self.model.label.append(trainy[i])
self.model.svn = len(index)
self.model.w = self.w
#--------------------------------------------------------
b1 = 0.0
b2 = 0.0
c1 = 0
for i in range(0,len(index) ):
if trainy[index[i]] == -1:
b1 += -trainy[index[i]] * self.G[index[i]]
c1 += 1
else:
b2 += -trainy[index[i]] * self.G[index[i]]
self.b = ((b1/c1)+(b2/(self.model.svn - c1)))/2
self.model.b = self.b
print 'the threshold value =', self.b
#--------------------------------------------------------
#to store support vector machines.
if self.model.config.isdense == True:
sv = []
for i in range(0, len(index)):
sv.append(trainx[index[i]].tolist()[0])
self.model.sv = matrix(sv)
else:
rows = []
cols = []
vals = []
pos = 0
for i in range(0, len(index)):
i1 = trainx.rows[index[i]]
p1 = 0
if index[i] < len(trainx.rows)-1 :
p1 = trainx.rows[index[i]+1] - trainx.rows[index[i]]
k = 0
while(k < p1):
cols.append(trainx.cols[i1 + k])
vals.append(trainx.vals[i1 + k])
k += 1
rows.append(pos)
pos += p1
rows.append(len(vals))
self.model.sv = Matrix(rows, cols, vals ,self.model.svn, trainx.nCol )
#dump model path
f = open(self.model.config.modelpath, "w")
modelStr = pickle.dumps(self.model, 1)
pickle.dump(modelStr, f)
f.close()
self.istrained = True
try:
os.remove(self.model.config.logpath)
except:
pass
return [time.time()-starttime,iterations]
def Test(self,testx,testy):
'''To test samples.
self.testx is training matrix and self.testy is classifying label'''
TP = 0.0
TN = 0.0
FP = 0.0
FN = 0.0
Recall = 0.0
Precision = 0.0
Accuracy = 0.0
Fbeta1 = 0.0
Fbeta2 = 0.0
AUCb = 0.0
TPR= 0.0
FPR = 0.0
pn = 0.0
nn = 0.0
tprlist = []
fprlist = []
outputlist = []
for i in range(len(testy)):
if testy[i] == 1:
pn = pn + 1
else:
nn = nn + 1
#check parameter
if (not self.istrained):
print "Error!, not trained!"
return False
K = Svm_Util.kernel_function(self.model.config, self.model.sv, testx)
nrow = 0
if self.model.config.isdense == True:
nrow = testx.shape[0]
else:
nrow = testx.nRow
for i in range(0, nrow):
fxi = 0.0
if self.model.config.kernel_type == 'Linear':
fxi = Svm_Util.dot(self.model.config, Svm_Util.convert(self.model.config, self.model.w), testx, 0, i) + self.model.b
else:
for j in range(0, self.model.svn):
fxi += self.model.alpha[j] * self.model.label[j] * K(j, i)
fxi += self.model.b
if testy[i] == 1 and fxi >=0:
TP += 1
if testy[i] == -1 and fxi <=0:
TN += 1
if testy[i] == -1 and fxi >=0:
FP += 1
if testy[i] == 1 and fxi <=0:
FN += 1
#to calculate ROC value.
TPR = TP/pn
FPR = FP/nn
tprlist.append(TPR)
fprlist.append(FPR)
outputlist.append([fxi,testy[i]])
print i,': Actual output is', fxi, 'It\'s label is', testy[i]
#to calculate auc
auc = 0.
prev_rectangular_right_vertex = 0
tpr_max = 0
fpr_max = 0
for i in range(0,len(fprlist)):
if tpr_max < tprlist[i]:
tpr_max = tprlist[i]
if fpr_max < fprlist[i]:
fpr_max = fprlist[i]
if fprlist[i] != prev_rectangular_right_vertex:
auc += (fprlist[i] - prev_rectangular_right_vertex) * tprlist[i]
prev_rectangular_right_vertex = fprlist[i]
try:
Recall = TP/(TP + FN)
Precision = TP/(TP + FP)
Accuracy = (TP + TN)/(TP + TN + FP + FN)
Fbeta1 = 2 * (Recall * Precision)/(1 + Precision + Recall)
Fbeta2 = 5 * (Recall * Precision)/(4 + Precision + Recall)
AUCb = (Recall + TN/(FP + TN))/2
print 'Recall = ', Recall, 'Precision = ', Precision,'Accuracy = ', Accuracy,'\n', 'F(beta=1) = ', Fbeta1, 'F(beta=2) = ', Fbeta2, 'AUCb = ',AUCb
except Exception as detail:
print 'to test error,detail is:', detail
self.calculate_auc(outputlist,testy)
return [Recall,Precision,Accuracy,Fbeta1,Fbeta2,AUCb,auc]
| bsd-3-clause |
Achuth17/scikit-learn | examples/svm/plot_svm_anova.py | 250 | 2000 | """
=================================================
SVM-Anova: SVM with univariate feature selection
=================================================
This example shows how to perform univariate feature before running a SVC
(support vector classifier) to improve the classification scores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
###############################################################################
# Import some data to play with
digits = datasets.load_digits()
y = digits.target
# Throw away data, to be in the curse of dimension settings
y = y[:200]
X = digits.data[:200]
n_samples = len(y)
X = X.reshape((n_samples, -1))
# add 200 non-informative features
X = np.hstack((X, 2 * np.random.random((n_samples, 200))))
###############################################################################
# Create a feature-selection transform and an instance of SVM that we
# combine together to have an full-blown estimator
transform = feature_selection.SelectPercentile(feature_selection.f_classif)
clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))])
###############################################################################
# Plot the cross-validation score as a function of percentile of features
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
# Compute cross-validation score using all CPUs
this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title(
'Performance of the SVM-Anova varying the percentile of features selected')
plt.xlabel('Percentile')
plt.ylabel('Prediction rate')
plt.axis('tight')
plt.show()
| bsd-3-clause |
canavandl/bokeh | bokeh/charts/builder/donut_builder.py | 31 | 8206 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Donut class which lets you build your Donut charts just passing
the arguments to the Chart class and calling the proper functions.
It also add a new chained stacked method.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division
from math import pi
import pandas as pd
from ..utils import cycle_colors, polar_to_cartesian
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import AnnularWedge, Text, Wedge
from ...properties import Any, Bool, Either, List
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Donut(values, cat=None, width=800, height=800, xgrid=False, ygrid=False, **kws):
""" Creates a Donut chart using :class:`DonutBuilder <bokeh.charts.builder.donut_builder.DonutBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.charts import Donut, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = [[2., 5., 3.], [4., 1., 4.], [6., 4., 3.]]
donut = Donut(xyvalues, ['cpu1', 'cpu2', 'cpu3'])
output_file('donut.html')
show(donut)
"""
return create_and_build(
DonutBuilder, values, cat=cat, width=width, height=height,
xgrid=xgrid, ygrid=ygrid, **kws
)
class DonutBuilder(Builder):
"""This is the Donut class and it is in charge of plotting
Donut chart in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the donut slices and angles.
And finally add the needed glyphs (Wedges and AnnularWedges) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
def _process_data(self):
"""Take the chart data from self._values.
It calculates the chart properties accordingly (start/end angles).
Then build a dict containing references to all the calculated
points to be used by the Wedge glyph inside the ``_yield_renderers`` method.
"""
dd = dict(zip(self._values.keys(), self._values.values()))
self._df = df = pd.DataFrame(dd)
self._groups = df.index = self.cat
df.columns = self._values.keys()
# Get the sum per category
aggregated = df.T.sum()
# Get the total (sum of all categories)
self._total_units = total = aggregated.sum()
radians = lambda x: 2*pi*(x/total)
angles = aggregated.map(radians).cumsum()
end_angles = angles.tolist()
start_angles = [0] + end_angles[:-1]
colors = cycle_colors(self.cat, self.palette)
self.set_and_get("", "colors", colors)
self.set_and_get("", "end", end_angles)
self.set_and_get("", "start", start_angles)
def _set_sources(self):
"""Push the Donut data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = Range1d(start=-2, end=2)
self.y_range = Range1d(start=-2, end=2)
def draw_central_wedge(self):
"""Draw the central part of the donut wedge from donut.source and
its calculated start and end angles.
"""
glyph = Wedge(
x=0, y=0, radius=1, start_angle="start", end_angle="end",
line_color="white", line_width=2, fill_color="colors"
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
def draw_central_descriptions(self):
"""Draw the descriptions to be placed on the central part of the
donut wedge
"""
text = ["%s" % cat for cat in self.cat]
x, y = polar_to_cartesian(0.7, self._data["start"], self._data["end"])
text_source = ColumnDataSource(dict(text=text, x=x, y=y))
glyph = Text(
x="x", y="y", text="text",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def draw_external_ring(self, colors=None):
"""Draw the external part of the donut wedge from donut.source
and its related descriptions
"""
if colors is None:
colors = cycle_colors(self.cat, self.palette)
first = True
for i, (cat, start_angle, end_angle) in enumerate(zip(
self.cat, self._data['start'], self._data['end'])):
details = self._df.ix[i]
radians = lambda x: 2*pi*(x/self._total_units)
angles = details.map(radians).cumsum() + start_angle
end = angles.tolist() + [end_angle]
start = [start_angle] + end[:-1]
base_color = colors[i]
#fill = [ base_color.lighten(i*0.05) for i in range(len(details) + 1) ]
fill = [base_color for i in range(len(details) + 1)]
text = [rowlabel for rowlabel in details.index]
x, y = polar_to_cartesian(1.25, start, end)
source = ColumnDataSource(dict(start=start, end=end, fill=fill))
glyph = AnnularWedge(
x=0, y=0, inner_radius=1, outer_radius=1.5,
start_angle="start", end_angle="end",
line_color="white", line_width=2,
fill_color="fill"
)
yield GlyphRenderer(data_source=source, glyph=glyph)
text_angle = [(start[i]+end[i])/2 for i in range(len(start))]
text_angle = [angle + pi if pi/2 < angle < 3*pi/2 else angle
for angle in text_angle]
if first and text:
text.insert(0, '')
offset = pi / 48
text_angle.insert(0, text_angle[0] - offset)
start.insert(0, start[0] - offset)
end.insert(0, end[0] - offset)
x, y = polar_to_cartesian(1.25, start, end)
first = False
data = dict(text=text, x=x, y=y, angle=text_angle)
text_source = ColumnDataSource(data)
glyph = Text(
x="x", y="y", text="text", angle="angle",
text_align="center", text_baseline="middle"
)
yield GlyphRenderer(data_source=text_source, glyph=glyph)
def _yield_renderers(self):
"""Use the AnnularWedge and Wedge glyphs to display the wedges.
Takes reference points from data loaded at the ColumnDataSurce.
"""
# build the central round area of the donut
renderers = []
renderers += self.draw_central_wedge()
# write central descriptions
renderers += self.draw_central_descriptions()
# build external donut ring
renderers += self.draw_external_ring()
return renderers
| bsd-3-clause |
hsiaoyi0504/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
Josue-Martinez-Moreno/trackeddy | examples/random_field.py | 1 | 8259 | import time
tic=time.time()
import matplotlib
matplotlib.use('Agg')
import trackeddy
import trackeddy.tracking as ttrack
from trackeddy.geometryfunc import *
from pylab import *
import random
import pdb
import cmocean as cm
import matplotlib.gridspec as gridspec
import trackeddy.utils.field_generator as fg
import importlib
importlib.reload(ttrack)
t = 1000
n = 13
xx = linspace(10,12,200)
yy = linspace(10,12,200)
#print("Generate field")
#gf=fg.Generate_field(0.1,0.1,n,xx,yy,'Nint')
#data = gf.assemble_field(t)
data = zeros((t,300,300))
for tt in range(t):
print(tt)
gf=fg.Generate_field(0.1,0.1,randint(5, 15),xx,yy,'Nint')
data[tt,:,:] = gf.assemble_field(1)
##
x = linspace(10,12,300)
y = linspace(10,12,300)
################################################################################
################################################################################
#################################### FLAT ######################################
################################################################################
################################################################################
preferences={'ellipse':0.85,'eccentricity':0.85,'gaussian':0.8}
eddytd={}
eddytdn={}
t0 = 0
t = 1000
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
####
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn = trackeddy.tracking.analyseddyzt(data,x,y,t0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True,debug=False)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
f_field = pos_f+neg_f
for tt in range(t0,t):
f = plt.figure()
gs = gridspec.GridSpec(2, 1)
ax1 = plt.subplot(gs[0])
ax1.pcolormesh(x,y,data[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2 = plt.subplot(gs[1])
ax2.pcolormesh(f_field[tt,:,:],vmin=-1,vmax=1,cmap=cm.cm.balance)
ax2.contour(f_field[tt,:,:])
ax1.set_title('Assamble: %03d' % tt)
plt.savefig('time_%03d.png' %tt)
################################################################################
################################################################################
#################################### WAVE ######################################
################################################################################
################################################################################
amplitude = 1
frequency = 20
phase = 1
waves = zeros(shape(data))
X,Y = meshgrid(x,y)
for t in range(0,t):
r = X+y/10
waves[t,:,:] = 0.3*sin(r*frequency-t + phase)
wave_data = waves+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_w = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_w = reconstruct_syntetic(shape(data),x,y,eddytdn)
w_field = pos_w+neg_w
################################################################################
################################################################################
#################################### JETS ######################################
################################################################################
################################################################################
k_y = 3
phase = 1
k_x = 2
jets = zeros(shape(data))
for t in range(0,t):
r = Y
k_y=random.uniform(2, 3)
phase=random.uniform(0, 1)
k_x=random.uniform(1, 2)
amp=0.3
jets[t,:,:] = amp*cos((k_y*(k_y*Y+phase+sin(k_x*X-t))))
jet_data = jets+data
levels = {'max':data.max(),'min':0.05,'step':0.05}
eddytd=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
levels = {'max':data.min(),'min':-0.05,'step':-0.05}
eddytdn=ttrack.analyseddyzt(data,x,y,0,t,1,levels,preferences=preferences,areamap='',mask='',maskopt='forcefit'\
,destdir='',physics='',diagnostics=False,plotdata=False,pprint=True)
pos_f = reconstruct_syntetic(shape(data),x,y,eddytd)
neg_f = reconstruct_syntetic(shape(data),x,y,eddytdn)
j_field = pos_f+neg_f
################################################################################
################################################################################
##################################### KE #######################################
################################################################################
################################################################################
m_ke_c = []
m_ke_f = []
m_ke_w = []
m_ke_j = []
for tt in range(shape(data)[0]):
u_c,v_c = geovelfield( data[tt,:,:] ,x,y)
u_f,v_f = geovelfield(f_field[tt,:,:],x,y)
u_w,v_w = geovelfield(w_field[tt,:,:],x,y)
u_j,v_j = geovelfield(j_field[tt,:,:],x,y)
ke_c = KE(u_c,v_c)
ke_f = KE(u_f,v_f)
ke_w = KE(u_w,v_w)
ke_j = KE(u_j,v_j)
m_ke_c.append(mean(ke_c))
m_ke_f.append(mean(ke_f))
m_ke_w.append(mean(ke_w))
m_ke_j.append(mean(ke_j))
################################################################################
################################################################################
#################################### PLOT ######################################
################################################################################
################################################################################
import seaborn as sns
import pandas as pd
from scipy.stats import spearmanr,linregress
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_f]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_f)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate flat: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('e_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_w]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_w)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate sin: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('w_vs_e.png')
figure(dpi=300)
data=np.vstack([m_ke_c,m_ke_j]).T
df = pd.DataFrame(data, columns=[r"$KE_c$", r"$KE_r$"])
g1 = sns.jointplot(x=r"$KE_c$", y=r"$KE_r$", data=df, kind="kde",cmap='Blues',joint_kws={'shade_lowest':False})
lims = [100, 0]
g1.ax_joint.plot(lims, lims, '--k')
s,i,r,p,std=linregress(m_ke_c,m_ke_j)
x0=0
y0=s*x0+i
x1=100
y1=s*x1+i
g1.ax_joint.plot([x0,x1], [y0,y1], '-.b')
g1.ax_joint.text(60,20,r'R = %2f' % r, color='b')
g1.ax_marg_x.set_xlim(0,100)
g1.ax_marg_y.set_ylim(0,100)
print('estimate jet: ',mean([abs(y0/100),abs(1-y1/100)]))
plt.savefig('j_vs_e.png')
# for ii in range(0,30):
# plt.figure()
# plt.pcolormesh(af[ii])
# plt.savefig('%03d.png' %ii)
# plt.show()
toc=time.time()
print("######## ELAPSED TIME: ###########")
print("######## %2f s ###########" % (toc-tic)) | mit |
stefanbuenten/nanodegree | p5/tools/startup.py | 9 | 1161 | #!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for scipy"
try:
import scipy
except:
print "you should install scipy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
| mit |
mjgrav2001/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
tbs1980/densplot | densplot.py | 1 | 6416 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging
import math
import matplotlib.ticker as mtick
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.ndimage import gaussian_filter
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
http://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
"""
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return (average, math.sqrt(variance))
def densplot(samples,weights):
"""
A function to plot the densities from MCMC chains.
@param samples mcmc samples
@param weights weights for samples
"""
# sanity checks
if samples.shape[0] != weights.shape[0] :
raise ValueError("Number of rows in samples and weights should be equal")
if samples.shape[0] < samples.shape[1] :
raise ValueError("We require more samples than number of parameters")
# define the dimensions
num_params = samples.shape[1]
majorFormatter = FormatStrFormatter('%4.2e')
K = num_params
factor = 1.5 # size of one side of one panel
lbdim = 1.5 * factor # size of left/bottom margin
trdim = 1.5 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# get the subplots
fig, axes = plt.subplots(num_params,num_params,sharex=False, sharey=False,
squeeze=False,figsize=(dim, dim))
fig.subplots_adjust(wspace=0., hspace=0.)
for ent_i in range(num_params):
for ent_j in range(num_params):
ax = axes[ent_i,ent_j]
ax.tick_params(axis='both',which='both',bottom='off', top='off',
right='off',left='off', labelbottom='off', labeltop='off',
labelleft='off',labelright='off')
# delete all the upper triangle
if ent_j > ent_i :
fig.delaxes(ax)
# histogram of each parameter
if ent_j == ent_i :
mu,sig = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin = mu - 4.*sig
rmax = mu + 4.*sig
counts,bins,patchs = ax.hist(samples[:,ent_j],histtype='step',
weights=np.exp(weights),normed=True,color='black',
bins=20,range=(rmin,rmax))
counts_smooth = gaussian_filter(counts,sigma=2)
counts_smooth_sorted_flat = sorted(counts_smooth.flatten(),reverse=True)
hist_axes = ax.axis()
ax.plot([mu+sig,mu+sig],[hist_axes[2],hist_axes[3]],color='r')
ax.plot([mu-sig,mu-sig],[hist_axes[2],hist_axes[3]],color='r')
# 2-d histogram for cross
if ent_j < ent_i :
mu_i,sig_i = weighted_avg_and_std(samples[:,ent_i],np.exp(weights))
mu_j,sig_j = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin_i = mu_i - 4.*sig_i
rmax_i = mu_i + 4.*sig_i
rmin_j = mu_j - 4.*sig_j
rmax_j = mu_j + 4.*sig_j
hist_range = ([rmin_i,rmax_i],[rmin_j,rmax_j])
counts,ybins,xbins,image = ax.hist2d(samples[:,ent_i],samples[:,ent_j],weights=np.exp(weights),
normed=True,bins=40,range=hist_range,cmap='Greys')
counts_smooth = gaussian_filter(counts,sigma=2)
counts_smooth_sorted_flat = sorted(counts_smooth.flatten(),reverse=True)
total = np.sum(counts_smooth_sorted_flat)
one_simga = total*0.68268949
sum_counts = 0.
level_1 = 0.
for i in range(len(counts_smooth_sorted_flat)):
sum_counts += counts_smooth_sorted_flat[i]
if sum_counts >= one_simga:
level_1 = counts_smooth_sorted_flat[i]
break
ax.contour(counts_smooth,extent=[rmin_i,rmax_i,rmin_j,rmax_j],
levels=[level_1],colors=['red'])
"""
two_simga = total*0.95449974
sum_counts = 0.
level_2 = 0.
for i in range(len(counts_smooth_sorted_flat)):
sum_counts += counts_smooth_sorted_flat[i]
if sum_counts >= two_simga:
level_2 = counts_smooth_sorted_flat[i]
break
ax.contour(counts_smooth,extent=[rmin_i,rmax_i,rmin_j,rmax_j],
levels=[level_1,level_2],colors=['red','blue'])
"""
# axis ticks and labels
if ent_i == num_params - 1:
ax.tick_params(axis='x',which='major',direction='in',
bottom='on', top='off',right='off',left='off',
labelbottom='on', labeltop='off',labelleft='off',labelright='off')
mu_i,sig_i = weighted_avg_and_std(samples[:,ent_i],np.exp(weights))
rmin_i = mu_i - 4.*sig_i
rmax_i = mu_i + 4.*sig_i
x_ticks = np.linspace(rmin_i,rmax_i,4)
ax.set_xticks(x_ticks[1:3])
ax.xaxis.set_major_formatter(majorFormatter)
[l.set_rotation(45) for l in ax.get_xticklabels()]
if ent_j == 0 and ent_j != ent_i:
ax.tick_params(axis='y',which='major',direction='in',
bottom='off', top='off',right='off',left='on',
labelbottom='off', labeltop='off',labelleft='on',labelright='off')
mu_j,sig_j = weighted_avg_and_std(samples[:,ent_j],np.exp(weights))
rmin_j = mu_j - 4.*sig_j
rmax_j = mu_j + 4.*sig_j
y_ticks = np.linspace(rmin_j,rmax_j,4)
ax.set_yticks(y_ticks[1:3])
ax.xaxis.set_major_formatter(majorFormatter)
ax.yaxis.set_major_formatter(majorFormatter)
[l.set_rotation(45) for l in ax.get_xticklabels()]
[l.set_rotation(45) for l in ax.get_yticklabels()]
return axes
| apache-2.0 |
makinacorpus/formhub | odk_viewer/tests/test_exports.py | 1 | 87463 | from sys import stdout
import os
import datetime
import json
import StringIO
import csv
import tempfile
import zipfile
import shutil
from openpyxl import load_workbook
from time import sleep
from pyxform.builder import create_survey_from_xls
from django.conf import settings
from main.tests.test_base import MainTestCase
from django.utils.dateparse import parse_datetime
from django.core.urlresolvers import reverse
from django.core.files.temp import NamedTemporaryFile
from odk_viewer.xls_writer import XlsWriter
from odk_viewer.views import delete_export, export_list, create_export,\
export_progress, export_download
from pyxform import SurveyElementBuilder
from odk_viewer.models import Export, ParsedInstance
from utils.export_tools import generate_export, increment_index_in_filename,\
dict_to_joined_export, ExportBuilder
from odk_logger.models import Instance, XForm
from main.views import delete_data
from utils.logger_tools import inject_instanceid
from django.core.files.storage import get_storage_class
from odk_viewer.pandas_mongo_bridge import NoRecordsFoundError
from odk_viewer.tasks import create_xls_export
from xlrd import open_workbook
from odk_viewer.models.parsed_instance import _encode_for_mongo
from odk_logger.xform_instance_parser import XFormInstanceParser
class TestExportList(MainTestCase):
def setUp(self):
super(TestExportList, self).setUp()
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'))
def test_csv_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_EXPORT}
# test csv
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_xls_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_kml_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.KML_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_gdoc_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.GDOC_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_xsv_zip_export_list(self):
kwargs = {'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_ZIP_EXPORT}
url = reverse(export_list, kwargs=kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class TestDataExportURL(MainTestCase):
def setUp(self):
super(TestDataExportURL, self).setUp()
self._publish_transportation_form()
def _filename_from_disposition(self, content_disposition):
filename_pos = content_disposition.index('filename=')
self.assertTrue(filename_pos != -1)
return content_disposition[filename_pos + len('filename='):]
def test_csv_export_url(self):
self._submit_transport_instance()
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/csv')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.csv')
def test_csv_export_url_without_records(self):
# csv using the pandas path can throw a NoRecordsFound Exception -
# handle it gracefully
url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_xls_export_url(self):
self._submit_transport_instance()
url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'],
'application/vnd.openxmlformats')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.xlsx')
def test_csv_zip_export_url(self):
self._submit_transport_instance()
url = reverse('csv_zip_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
})
response = self.client.get(url)
headers = dict(response.items())
self.assertEqual(headers['Content-Type'], 'application/zip')
content_disposition = headers['Content-Disposition']
filename = self._filename_from_disposition(content_disposition)
basename, ext = os.path.splitext(filename)
self.assertEqual(ext, '.zip')
class TestExports(MainTestCase):
def setUp(self):
super(TestExports, self).setUp()
self._submission_time = parse_datetime('2013-02-18 15:54:01Z')
def test_unique_xls_sheet_name(self):
xls_writer = XlsWriter()
xls_writer.add_sheet('section9_pit_latrine_with_slab_group')
xls_writer.add_sheet('section9_pit_latrine_without_slab_group')
# create a set of sheet names keys
sheet_names_set = set(xls_writer._sheets.keys())
self.assertEqual(len(sheet_names_set), 2)
def test_csv_http_response(self):
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'),
forced_submission_time=self._submission_time)
response = self.client.get(reverse('csv_export',
kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
test_file_path = os.path.join(os.path.dirname(__file__),
'fixtures', 'transportation.csv')
content = self._get_response_content(response)
with open(test_file_path, 'r') as test_file:
self.assertEqual(content, test_file.read())
def test_responses_for_empty_exports(self):
self._publish_transportation_form()
# test csv though xls uses the same view
url = reverse('csv_export',
kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}
)
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
self.assertIn('text/html', self.response['content-type'])
def test_create_export(self):
self._publish_transportation_form_and_submit_instance()
storage = get_storage_class()()
# test xls
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.xls')
# test csv
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.csv')
# test xls with existing export_id
existing_export = Export.objects.create(xform=self.xform,
export_type=Export.XLS_EXPORT)
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string, existing_export.id)
self.assertEqual(existing_export.id, export.id)
def test_delete_file_on_export_delete(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
storage = get_storage_class()()
self.assertTrue(storage.exists(export.filepath))
# delete export object
export.delete()
self.assertFalse(storage.exists(export.filepath))
def test_graceful_exit_on_export_delete_if_file_doesnt_exist(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
storage = get_storage_class()()
# delete file
storage.delete(export.filepath)
self.assertFalse(storage.exists(export.filepath))
# clear filename, like it would be in an incomplete export
export.filename = None
export.filedir = None
export.save()
# delete export record, which should try to delete file as well
delete_url = reverse(delete_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
post_data = {'export_id': export.id}
response = self.client.post(delete_url, post_data)
self.assertEqual(response.status_code, 302)
def test_delete_oldest_export_on_limit(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create first export
first_export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
self.assertIsNotNone(first_export.pk)
# create exports that exceed set limit
for i in range(Export.MAX_EXPORTS):
generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
# first export should be deleted
exports = Export.objects.filter(id=first_export.id)
self.assertEqual(len(exports), 0)
def test_create_export_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
num_exports = Export.objects.count()
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
response = self.client.post(create_export_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Export.objects.count(), num_exports + 1)
def test_delete_export_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
exports = Export.objects.filter(id=export.id)
self.assertEqual(len(exports), 1)
delete_url = reverse(delete_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
post_data = {'export_id': export.id}
response = self.client.post(delete_url, post_data)
self.assertEqual(response.status_code, 302)
exports = Export.objects.filter(id=export.id)
self.assertEqual(len(exports), 0)
def test_export_progress_output(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create exports
for i in range(2):
generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
self.assertEqual(Export.objects.count(), 2)
# progress for multiple exports
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
get_data = {'export_ids': [e.id for e in Export.objects.all()]}
response = self.client.get(progress_url, get_data)
content = json.loads(response.content)
self.assertEqual(len(content), 2)
self.assertEqual(sorted(['url', 'export_id', 'complete', 'filename']),
sorted(content[0].keys()))
def test_auto_export_if_none_exists(self):
self._publish_transportation_form()
self._submit_transport_instance()
# get export list url
num_exports = Export.objects.count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
response = self.client.get(export_list_url)
self.assertEqual(Export.objects.count(), num_exports + 1)
def test_dont_auto_export_if_exports_exist(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
response = self.client.post(create_export_url)
num_exports = Export.objects.count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
response = self.client.get(export_list_url)
self.assertEqual(Export.objects.count(), num_exports)
def test_last_submission_time_on_export(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
xls_export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
num_exports = Export.objects.filter(xform=self.xform,
export_type=Export.XLS_EXPORT).count()
# check that our function knows there are no more submissions
self.assertFalse(Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
# force new last submission date on xform
last_submission = self.xform.surveys.order_by('-date_created')[0]
last_submission.date_created += datetime.timedelta(hours=1)
last_submission.save()
# check that our function knows data has changed
self.assertTrue(Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
# check that requesting list url will generate a new export
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
response = self.client.get(export_list_url)
self.assertEqual(Export.objects.filter(xform=self.xform,
export_type=Export.XLS_EXPORT).count(), num_exports + 1)
# make sure another export type causes auto-generation
num_exports = Export.objects.filter(xform=self.xform,
export_type=Export.CSV_EXPORT).count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_EXPORT
})
response = self.client.get(export_list_url)
self.assertEqual(Export.objects.filter(xform=self.xform,
export_type=Export.CSV_EXPORT).count(), num_exports + 1)
def test_last_submission_time_empty(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
# set time of last submission to None
export.time_of_last_submission = None
export.save()
self.assertTrue(Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
def test_invalid_export_type(self):
self._publish_transportation_form()
self._submit_transport_instance()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'invalid'
})
response = self.client.get(export_list_url)
self.assertEqual(response.status_code, 400)
# test create url
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'invalid'
})
response = self.client.post(create_export_url)
self.assertEqual(response.status_code, 400)
def test_add_index_to_filename(self):
filename = "file_name-123f.txt"
new_filename = increment_index_in_filename(filename)
expected_filename = "file_name-123f-1.txt"
self.assertEqual(new_filename, expected_filename)
# test file that already has an index
filename = "file_name-123.txt"
new_filename = increment_index_in_filename(filename)
expected_filename = "file_name-124.txt"
self.assertEqual(new_filename, expected_filename)
def test_duplicate_export_filename_is_renamed(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create an export object in the db
# TODO: only works if the time we time we generate the basename is exact to the second with the time the 2nd export is created
basename = "%s_%s" % (self.xform.id_string,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + ".csv"
export = Export.objects.create(xform=self.xform,
export_type=Export.CSV_EXPORT, filename=filename)
# 2nd export
export_2 = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
if export.created_on.timetuple() == export_2.created_on.timetuple():
new_filename = increment_index_in_filename(filename)
self.assertEqual(new_filename, export_2.filename)
else:
stdout.write("duplicate export filename test skipped because export times differ.")
def test_export_download_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
csv_export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.CSV_EXPORT,
"filename": export.filename
})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# test xls
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
xls_export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.XLS_EXPORT,
"filename": export.filename
})
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
def test_404_on_export_io_error(self):
"""
Test that we return a 404 when the response_with_mimetype_and_name encounters an IOError
"""
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.CSV_EXPORT,
"filename": export.filename
})
# delete the export
export.delete()
# access the export
response = self.client.get(export_url)
self.assertEqual(response.status_code, 404)
def test_deleted_submission_not_in_export(self):
self._publish_transportation_form()
initial_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self._submit_transport_instance(0)
self._submit_transport_instance(1)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count+2)
# get id of second submission
instance_id = Instance.objects.filter(
xform=self.xform).order_by('id').reverse()[0].id
delete_url = reverse(
delete_data, kwargs={"username": self.user.username,
"id_string": self.xform.id_string})
params = {'id': instance_id}
self.client.post(delete_url, params)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count + 1)
# create the export
csv_export_url = reverse(
'csv_export', kwargs={"username": self.user.username,
"id_string":self.xform.id_string})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
f = StringIO.StringIO(self._get_response_content(response))
csv_reader = csv.reader(f)
num_rows = len([row for row in csv_reader])
f.close()
# number of rows == 2 i.e. initial_count + header plus one row
self.assertEqual(num_rows, initial_count + 2)
def test_edited_submissions_in_exports(self):
self._publish_transportation_form()
initial_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
instance_name = 'transport_2011-07-25_19-05-36'
path = os.path.join(
'main', 'tests', 'fixtures', 'transportation', 'instances_w_uuid',
instance_name, instance_name + '.xml')
self._make_submission(path)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count+1)
instance = Instance.objects.filter(
xform=self.xform).order_by('id').reverse()[0]
# make edited submission - simulating what enketo would return
instance_name = 'transport_2011-07-25_19-05-36-edited'
path = os.path.join(
'main', 'tests', 'fixtures', 'transportation', 'instances_w_uuid',
instance_name, instance_name + '.xml')
self._make_submission(path)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count+1)
# create the export
csv_export_url = reverse(
'csv_export', kwargs={"username": self.user.username,
"id_string":self.xform.id_string})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
f = StringIO.StringIO(self._get_response_content(response))
csv_reader = csv.DictReader(f)
data = [row for row in csv_reader]
f.close()
num_rows = len(data)
# number of rows == initial_count + 1
self.assertEqual(num_rows, initial_count + 1)
key ='transport/loop_over_transport_types_frequency/ambulance/frequency_to_referral_facility'
self.assertEqual(data[initial_count][key], "monthly")
def test_export_ids_dont_have_comma_separation(self):
"""
It seems using {{ }} to output numbers greater than 1000 formats the
number with a thousand separator
"""
self._publish_transportation_form()
self._submit_transport_instance()
# create an in-complete export
export = Export.objects.create(id=1234, xform=self.xform,
export_type=Export.XLS_EXPORT)
self.assertEqual(export.pk, 1234)
export_list_url = reverse(
export_list, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.XLS_EXPORT
})
response = self.client.get(export_list_url)
self.assertContains(response, '#delete-1234"')
self.assertNotContains(response, '#delete-1,234"')
def test_export_progress_updates(self):
"""
Test that after generate_export is called, we change out state to
pending and after its complete, we change it to complete, if we fail
between the two, updates, we have failed
"""
self._publish_transportation_form()
# generate an export that fails because of the NoRecordsFound exception
export = Export.objects.create(xform=self.xform,
export_type=Export.XLS_EXPORT)
# check that progress url says pending
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], False)
self.assertEqual(status["filename"], None)
export.internal_status = Export.FAILED
export.save()
# check that progress url says failed
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], True)
self.assertEqual(status["filename"], None)
# make a submission and create a valid export
self._submit_transport_instance()
create_xls_export(
self.user.username,
self.xform.id_string, export.id)
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], True)
self.assertIsNotNone(status["filename"])
def test_direct_export_returns_newset_export_if_not_updated_since(self):
self._publish_transportation_form()
self._submit_transport_instance()
self.assertEqual(self.response.status_code, 201)
self._submit_transport_instance_w_uuid("transport_2011-07-25_19-05-36")
self.assertEqual(self.response.status_code, 201)
initial_num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
initial_num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
# request a direct csv export
csv_export_url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
xls_export_url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have initial_num_exports + 1 exports
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 1)
# request another export without changing the data
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should still only have a single export object
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 1)
# this should not affect a direct XLS export and XLS should still re-generate
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
self.assertEqual(num_xls_exports, initial_num_xls_exports + 1)
# make sure xls doesnt re-generate if data hasn't changed
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
self.assertEqual(num_xls_exports, initial_num_xls_exports + 1)
# check that data edits cause a re-generation
self._submit_transport_instance_w_uuid(
"transport_2011-07-25_19-05-36-edited")
self.assertEqual(self.response.status_code, 201)
self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have an extra export now that the data has been updated
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 2)
# and when we delete
delete_url = reverse(delete_data, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
instance = Instance.objects.latest('date_modified')
response = self.client.post(delete_url, {'id': instance.id})
self.assertEqual(response.status_code, 200)
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have an extra export now that the data has been updated by the delete
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 3)
def test_exports_outdated_doesnt_consider_failed_exports(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create a bad export
export = Export.objects.create(
xform=self.xform, export_type=Export.XLS_EXPORT,
internal_status=Export.FAILED)
self.assertTrue(
Export.exports_outdated(self.xform, export.export_type))
def test_exports_outdated_considers_pending_exports(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create a pending export
export = Export.objects.create(
xform=self.xform, export_type=Export.XLS_EXPORT,
internal_status=Export.PENDING)
self.assertFalse(
Export.exports_outdated(self.xform, export.export_type))
def _get_csv_data(self, filepath):
storage = get_storage_class()()
csv_file = storage.open(filepath)
reader = csv.DictReader(csv_file)
data = reader.next()
csv_file.close()
return data
def _get_xls_data(self, filepath):
storage = get_storage_class()()
with storage.open(filepath) as f:
workbook = open_workbook(file_contents=f.read())
transportation_sheet = workbook.sheet_by_name("transportation")
self.assertTrue(transportation_sheet.nrows > 1)
headers = transportation_sheet.row_values(0)
column1 = transportation_sheet.row_values(1)
return dict(zip(headers, column1))
def test_column_header_delimiter_export_option(self):
self._publish_transportation_form()
# survey 1 has ambulance and bicycle as values for
# transport/available_transportation_types_to_referral_facility
self._submit_transport_instance(survey_at=1)
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
})
default_params = {}
custom_params = {
'options[group_delimiter]': '.',
}
# test csv with default group delimiter
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
self.assertTrue(
data.has_key(
'transport/available_transportation_types_to_referral_facility/ambulance'))
self.assertEqual(
data['transport/available_transportation_types_to_referral_facility/ambulance'], 'True')
# test csv with dot delimiter
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
self.assertTrue(
data.has_key(
'transport.available_transportation_types_to_referral_facility.ambulance'))
self.assertEqual(
data['transport.available_transportation_types_to_referral_facility.ambulance'], 'True')
# test xls with default group delimiter
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
self.assertTrue(
data.has_key("transport/available_transportation_types_to_referral_facility/ambulance"))
# xlrd reader seems to convert bools into integers i.e. 0 or 1
self.assertEqual(
data["transport/available_transportation_types_to_referral_facility/ambulance"], 1)
# test xls with dot delimiter
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
self.assertTrue(
data.has_key("transport.available_transportation_types_to_referral_facility.ambulance"))
# xlrd reader seems to convert bools into integers i.e. 0 or 1
self.assertEqual(
data["transport.available_transportation_types_to_referral_facility.ambulance"], 1)
def test_split_select_multiple_export_option(self):
self._publish_transportation_form()
self._submit_transport_instance(survey_at=1)
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
})
default_params = {}
custom_params = {
'options[dont_split_select_multiples]': 'yes'
}
# test csv with default split select multiples
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
# we should have transport/available_transportation_types_to_referral_facility/ambulance as a separate column
self.assertTrue(
data.has_key(
'transport/available_transportation_types_to_referral_facility/ambulance'))
# test csv without default split select multiples
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
# transport/available_transportation_types_to_referral_facility/ambulance should not be in its own column
self.assertFalse(
data.has_key(
'transport/available_transportation_types_to_referral_facility/ambulance'))
# transport/available_transportation_types_to_referral_facility should be a column
self.assertTrue(
data.has_key(
'transport/available_transportation_types_to_referral_facility'))
# check that ambulance is one the values within the transport/available_transportation_types_to_referral_facility column
self.assertTrue("ambulance" in data['transport/available_transportation_types_to_referral_facility'].split(" "))
create_xls_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
# test xls with default split select multiples
response = self.client.post(create_xls_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
# we should have transport/available_transportation_types_to_referral_facility/ambulance as a separate column
self.assertTrue(
data.has_key(
'transport/available_transportation_types_to_referral_facility/ambulance'))
# test xls without default split select multiples
response = self.client.post(create_xls_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
# transport/available_transportation_types_to_referral_facility/ambulance should NOT be in its own column
self.assertFalse(
data.has_key(
'transport/available_transportation_types_to_referral_facility/ambulance'))
# transport/available_transportation_types_to_referral_facility should be a column
self.assertTrue(
data.has_key(
'transport/available_transportation_types_to_referral_facility'))
# check that ambulance is one the values within the transport/available_transportation_types_to_referral_facility column
self.assertTrue("ambulance" in data['transport/available_transportation_types_to_referral_facility'].split(" "))
def test_dict_to_joined_export_works(self):
data =\
{
'name': 'Abe',
'age': '35',
'_geolocation': [None, None],
'attachments': ['abcd.jpg', 'efgh.jpg'],
'children':
[
{
'children/name': 'Mike',
'children/age': '5',
'children/cartoons':
[
{
'children/cartoons/name': 'Tom & Jerry',
'children/cartoons/why': 'Tom is silly',
},
{
'children/cartoons/name': 'Flinstones',
'children/cartoons/why': u"I like bamb bam\u0107",
}
]
},
{
'children/name': 'John',
'children/age': '2',
'children/cartoons':[]
},
{
'children/name': 'Imora',
'children/age': '3',
'children/cartoons':
[
{
'children/cartoons/name': 'Shrek',
'children/cartoons/why': 'He\'s so funny'
},
{
'children/cartoons/name': 'Dexter\'s Lab',
'children/cartoons/why': 'He thinks hes smart',
'children/cartoons/characters':
[
{
'children/cartoons/characters/name': 'Dee Dee',
'children/cartoons/characters/good_or_evil': 'good'
},
{
'children/cartoons/characters/name': 'Dexter',
'children/cartoons/characters/good_or_evil': 'evil'
},
]
}
]
}
]
}
expected_output =\
{
'survey': {
'name': 'Abe',
'age': '35'
},
'children':
[
{
'children/name': 'Mike',
'children/age': '5',
'_index': 1,
'_parent_table_name': 'survey',
'_parent_index': 1
},
{
'children/name': 'John',
'children/age': '2',
'_index': 2,
'_parent_table_name': 'survey',
'_parent_index': 1
},
{
'children/name': 'Imora',
'children/age': '3',
'_index': 3,
'_parent_table_name': 'survey',
'_parent_index': 1
},
],
'children/cartoons':
[
{
'children/cartoons/name': 'Tom & Jerry',
'children/cartoons/why': 'Tom is silly',
'_index': 1,
'_parent_table_name': 'children',
'_parent_index': 1
},
{
'children/cartoons/name': 'Flinstones',
'children/cartoons/why': u"I like bamb bam\u0107",
'_index': 2,
'_parent_table_name': 'children',
'_parent_index': 1
},
{
'children/cartoons/name': 'Shrek',
'children/cartoons/why': 'He\'s so funny',
'_index': 3,
'_parent_table_name': 'children',
'_parent_index': 3
},
{
'children/cartoons/name': 'Dexter\'s Lab',
'children/cartoons/why': 'He thinks hes smart',
'_index': 4,
'_parent_table_name': 'children',
'_parent_index': 3
}
],
'children/cartoons/characters':
[
{
'children/cartoons/characters/name': 'Dee Dee',
'children/cartoons/characters/good_or_evil': 'good',
'_index': 1,
'_parent_table_name': 'children/cartoons',
'_parent_index': 4
},
{
'children/cartoons/characters/name': 'Dexter',
'children/cartoons/characters/good_or_evil': 'evil',
'_index': 2,
'_parent_table_name': 'children/cartoons',
'_parent_index': 4
}
]
}
survey_name = 'survey'
indices = {survey_name: 0}
output = dict_to_joined_export(data, 1, indices, survey_name)
self.assertEqual(output[survey_name], expected_output[survey_name])
# 1st level
self.assertEqual(len(output['children']), 3)
for child in enumerate(['Mike', 'John', 'Imora']):
index = child[0]
name = child[1]
self.assertEqual(
filter(
lambda x: x['children/name'] == name,
output['children'])[0],
expected_output['children'][index])
# 2nd level
self.assertEqual(len(output['children/cartoons']), 4)
for cartoon in enumerate(
['Tom & Jerry', 'Flinstones', 'Shrek', 'Dexter\'s Lab']):
index = cartoon[0]
name = cartoon[1]
self.assertEqual(
filter(
lambda x: x['children/cartoons/name'] == name,
output['children/cartoons'])[0],
expected_output['children/cartoons'][index])
# 3rd level
self.assertEqual(len(output['children/cartoons/characters']), 2)
for characters in enumerate(['Dee Dee', 'Dexter']):
index = characters[0]
name = characters[1]
self.assertEqual(
filter(
lambda x: x['children/cartoons/characters/name'] == name,
output['children/cartoons/characters'])[0],
expected_output['children/cartoons/characters'][index])
def test_generate_csv_zip_export(self):
# publish xls form
self._publish_transportation_form_and_submit_instance()
# create export db object
export = generate_export(
Export.CSV_ZIP_EXPORT, "zip", self.user.username,
self.xform.id_string, group_delimiter='/',
split_select_multiples=True)
storage = get_storage_class()()
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.zip')
class TestExportBuilder(MainTestCase):
data = [
{
'name': 'Abe',
'age': 35,
'tel/telLg==office': '020123456',
'children':
[
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': 'red blue',
'children/iceLg==creams': 'vanilla chocolate',
'children/cartoons':
[
{
'children/cartoons/name': 'Tom & Jerry',
'children/cartoons/why': 'Tom is silly',
},
{
'children/cartoons/name': 'Flinstones',
'children/cartoons/why': u"I like bam bam\u0107"
# throw in a unicode character
}
]
},
{
'children/name': 'John',
'children/age': 2,
'children/cartoons': []
},
{
'children/name': 'Imora',
'children/age': 3,
'children/cartoons':
[
{
'children/cartoons/name': 'Shrek',
'children/cartoons/why': 'He\'s so funny'
},
{
'children/cartoons/name': 'Dexter\'s Lab',
'children/cartoons/why': 'He thinks hes smart',
'children/cartoons/characters':
[
{
'children/cartoons/characters/name': 'Dee Dee',
'children/cartoons/characters/good_or_evil': 'good'
},
{
'children/cartoons/characters/name': 'Dexter',
'children/cartoons/characters/good_or_evil': 'evil'
},
]
}
]
}
]
},
{
# blank data just to be sure
'children': []
}
]
long_survey_data = [
{
'name': 'Abe',
'age': 35,
'childrens_survey_with_a_very_lo':
[
{
'childrens_survey_with_a_very_lo/name': 'Mike',
'childrens_survey_with_a_very_lo/age': 5,
'childrens_survey_with_a_very_lo/fav_colors': 'red blue',
'childrens_survey_with_a_very_lo/cartoons':
[
{
'childrens_survey_with_a_very_lo/cartoons/name': 'Tom & Jerry',
'childrens_survey_with_a_very_lo/cartoons/why': 'Tom is silly',
},
{
'childrens_survey_with_a_very_lo/cartoons/name': 'Flinstones',
'childrens_survey_with_a_very_lo/cartoons/why': u"I like bam bam\u0107"
# throw in a unicode character
}
]
},
{
'childrens_survey_with_a_very_lo/name': 'John',
'childrens_survey_with_a_very_lo/age': 2,
'childrens_survey_with_a_very_lo/cartoons': []
},
{
'childrens_survey_with_a_very_lo/name': 'Imora',
'childrens_survey_with_a_very_lo/age': 3,
'childrens_survey_with_a_very_lo/cartoons':
[
{
'childrens_survey_with_a_very_lo/cartoons/name': 'Shrek',
'childrens_survey_with_a_very_lo/cartoons/why': 'He\'s so funny'
},
{
'childrens_survey_with_a_very_lo/cartoons/name': 'Dexter\'s Lab',
'childrens_survey_with_a_very_lo/cartoons/why': 'He thinks hes smart',
'childrens_survey_with_a_very_lo/cartoons/characters':
[
{
'childrens_survey_with_a_very_lo/cartoons/characters/name': 'Dee Dee',
'children/cartoons/characters/good_or_evil': 'good'
},
{
'childrens_survey_with_a_very_lo/cartoons/characters/name': 'Dexter',
'children/cartoons/characters/good_or_evil': 'evil'
},
]
}
]
}
]
}
]
data_utf8 = [
{
'name': 'Abe',
'age': 35,
'tel/telLg==office': '020123456',
'childrenLg==info':
[
{
'childrenLg==info/nameLg==first': 'Mike',
'childrenLg==info/age': 5,
'childrenLg==info/fav_colors': u'red\u2019s blue\u2019s',
'childrenLg==info/ice_creams': 'vanilla chocolate',
'childrenLg==info/cartoons':
[
{
'childrenLg==info/cartoons/name': 'Tom & Jerry',
'childrenLg==info/cartoons/why': 'Tom is silly',
},
{
'childrenLg==info/cartoons/name': 'Flinstones',
'childrenLg==info/cartoons/why': u"I like bam bam\u0107"
# throw in a unicode character
}
]
}
]
}
]
def _create_childrens_survey(self):
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'childrens_survey.xls'))
return survey
def test_build_sections_from_survey(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
# test that we generate the proper sections
expected_sections = [
survey.name, 'children', 'children/cartoons',
'children/cartoons/characters']
self.assertEqual(
expected_sections, [s['name'] for s in export_builder.sections])
# main section should have split geolocations
expected_element_names = [
'name', 'age', 'geo/geolocation', 'geo/_geolocation_longitude',
'geo/_geolocation_latitude', 'geo/_geolocation_altitude',
'geo/_geolocation_precision', 'tel/tel.office', 'tel/tel.mobile',
'meta/instanceID']
section = export_builder.section_by_name(survey.name)
element_names = [element['xpath'] for element in section['elements']]
# fav_colors should have its choices split
self.assertEqual(
sorted(expected_element_names), sorted(element_names))
expected_element_names = [
'children/name', 'children/age', 'children/fav_colors',
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink', 'children/ice.creams',
'children/ice.creams/vanilla', 'children/ice.creams/strawberry',
'children/ice.creams/chocolate']
section = export_builder.section_by_name('children')
element_names = [element['xpath'] for element in section['elements']]
self.assertEqual(
sorted(expected_element_names), sorted(element_names))
expected_element_names = [
'children/cartoons/name', 'children/cartoons/why']
section = export_builder.section_by_name('children/cartoons')
element_names = [element['xpath'] for element in section['elements']]
self.assertEqual(
sorted(expected_element_names), sorted(element_names))
expected_element_names = [
'children/cartoons/characters/name',
'children/cartoons/characters/good_or_evil']
section = export_builder.section_by_name('children/cartoons/characters')
element_names = [element['xpath'] for element in section['elements']]
self.assertEqual(
sorted(expected_element_names), sorted(element_names))
def test_zipped_csv_export_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_zip_file = NamedTemporaryFile(suffix='.zip')
export_builder.to_zipped_csv(temp_zip_file.name, self.data)
temp_zip_file.seek(0)
temp_dir = tempfile.mkdtemp()
zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
zip_file.extractall(temp_dir)
zip_file.close()
temp_zip_file.close()
# generate data to compare with
index = 1
indices = {}
survey_name = survey.name
outputs = []
for d in self.data:
outputs.append(
dict_to_joined_export(d, index, indices, survey_name))
index += 1
# check that each file exists
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "{0}.csv".format(survey.name))))
with open(
os.path.join(
temp_dir, "{0}.csv".format(survey.name))) as csv_file:
reader = csv.reader(csv_file)
rows = [r for r in reader]
# open comparison file
with open(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'csvs', 'childrens_survey.csv')) as fixture_csv:
fixture_reader = csv.reader(fixture_csv)
expected_rows = [r for r in fixture_reader]
self.assertEqual(rows, expected_rows)
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "children.csv")))
with open(os.path.join(temp_dir, "children.csv")) as csv_file:
reader = csv.reader(csv_file)
rows = [r for r in reader]
# open comparison file
with open(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'csvs', 'children.csv')) as fixture_csv:
fixture_reader = csv.reader(fixture_csv)
expected_rows = [r for r in fixture_reader]
self.assertEqual(rows, expected_rows)
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "children_cartoons.csv")))
with open(os.path.join(temp_dir, "children_cartoons.csv")) as csv_file:
reader = csv.reader(csv_file)
rows = [r for r in reader]
# open comparison file
with open(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'csvs', 'children_cartoons.csv')) as fixture_csv:
fixture_reader = csv.reader(fixture_csv)
expected_rows = [r for r in fixture_reader]
self.assertEqual(rows, expected_rows)
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "children_cartoons_characters.csv")))
with open(os.path.join(
temp_dir, "children_cartoons_characters.csv")) as csv_file:
reader = csv.reader(csv_file)
rows = [r for r in reader]
# open comparison file
with open(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'csvs', 'children_cartoons_characters.csv')) as fixture_csv:
fixture_reader = csv.reader(fixture_csv)
expected_rows = [r for r in fixture_reader]
self.assertEqual(rows, expected_rows)
shutil.rmtree(temp_dir)
def test_decode_mongo_encoded_section_names(self):
data = {
'main_section': [1, 2, 3, 4],
'sectionLg==1/info': [1, 2, 3, 4],
'sectionLg==2/info': [1, 2, 3, 4],
}
result = ExportBuilder.decode_mongo_encoded_section_names(data)
expected_result = {
'main_section': [1, 2, 3, 4],
'section.1/info': [1, 2, 3, 4],
'section.2/info': [1, 2, 3, 4],
}
self.assertEqual(result, expected_result)
def test_zipped_csv_export_works_with_unicode(self):
"""
cvs writer doesnt handle unicode we we have to encode to ascii
"""
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'childrens_survey_unicode.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_zip_file = NamedTemporaryFile(suffix='.zip')
export_builder.to_zipped_csv(temp_zip_file.name, self.data_utf8)
temp_zip_file.seek(0)
temp_dir = tempfile.mkdtemp()
zip_file = zipfile.ZipFile(temp_zip_file.name, "r")
zip_file.extractall(temp_dir)
zip_file.close()
temp_zip_file.close()
# check that the children's file (which has the unicode header) exists
self.assertTrue(
os.path.exists(
os.path.join(temp_dir, "children.info.csv")))
# check file's contents
with open(os.path.join(temp_dir, "children.info.csv")) as csv_file:
reader = csv.reader(csv_file)
expected_headers = ['children.info/name.first',
'children.info/age',
'children.info/fav_colors',
u'children.info/fav_colors/red\u2019s',
u'children.info/fav_colors/blue\u2019s',
u'children.info/fav_colors/pink\u2019s',
'children.info/ice_creams',
'children.info/ice_creams/vanilla',
'children.info/ice_creams/strawberry',
'children.info/ice_creams/chocolate', '_id',
'_uuid', '_submission_time', '_index',
'_parent_table_name', '_parent_index']
rows = [row for row in reader]
actual_headers = [h.decode('utf-8') for h in rows[0]]
self.assertEqual(sorted(actual_headers), sorted(expected_headers))
data = dict(zip(rows[0], rows[1]))
self.assertEqual(
data[u'children.info/fav_colors/red\u2019s'.encode('utf-8')],
'True')
self.assertEqual(
data[u'children.info/fav_colors/blue\u2019s'.encode('utf-8')],
'True')
self.assertEqual(
data[u'children.info/fav_colors/pink\u2019s'.encode('utf-8')],
'False')
# check that red and blue are set to true
shutil.rmtree(temp_dir)
def test_xls_export_works_with_unicode(self):
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'childrens_survey_unicode.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
temp_xls_file = NamedTemporaryFile(suffix='.xlsx')
export_builder.to_xls_export(temp_xls_file.name, self.data_utf8)
temp_xls_file.seek(0)
# check that values for red\u2019s and blue\u2019s are set to true
wb = load_workbook(temp_xls_file.name)
children_sheet = wb.get_sheet_by_name("children.info")
data = dict([(r[0].value, r[1].value) for r in children_sheet.columns])
self.assertTrue(data[u'children.info/fav_colors/red\u2019s'])
self.assertTrue(data[u'children.info/fav_colors/blue\u2019s'])
self.assertFalse(data[u'children.info/fav_colors/pink\u2019s'])
temp_xls_file.close()
def test_generation_of_multi_selects_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_select_multiples =\
{
'children':
{
'children/fav_colors':
[
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink'
],
'children/ice.creams':
[
'children/ice.creams/vanilla',
'children/ice.creams/strawberry',
'children/ice.creams/chocolate'
]
}
}
select_multiples = export_builder.select_multiples
self.assertTrue('children' in select_multiples)
self.assertTrue('children/fav_colors' in select_multiples['children'])
self.assertTrue('children/ice.creams' in select_multiples['children'])
self.assertEqual(
sorted(select_multiples['children']['children/fav_colors']),
sorted(
expected_select_multiples['children']['children/fav_colors']))
self.assertEqual(
sorted(select_multiples['children']['children/ice.creams']),
sorted(
expected_select_multiples['children']['children/ice.creams']))
def test_split_select_multiples_works(self):
select_multiples =\
{
'children/fav_colors': [
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink']
}
row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': 'red blue'
}
new_row = ExportBuilder.split_select_multiples(
row, select_multiples)
expected_row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': 'red blue',
'children/fav_colors/red': True,
'children/fav_colors/blue': True,
'children/fav_colors/pink': False
}
self.assertEqual(new_row, expected_row)
def test_split_select_multiples_works_when_data_is_blank(self):
select_multiples =\
{
'children/fav_colors': [
'children/fav_colors/red', 'children/fav_colors/blue',
'children/fav_colors/pink']
}
row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': ''
}
new_row = ExportBuilder.split_select_multiples(
row, select_multiples)
expected_row = \
{
'children/name': 'Mike',
'children/age': 5,
'children/fav_colors': '',
'children/fav_colors/red': False,
'children/fav_colors/blue': False,
'children/fav_colors/pink': False
}
self.assertEqual(new_row, expected_row)
def test_generation_of_gps_fields_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_gps_fields =\
{
'childrens_survey':
{
'geo/geolocation':
[
'geo/_geolocation_latitude', 'geo/_geolocation_longitude',
'geo/_geolocation_altitude', 'geo/_geolocation_precision'
]
}
}
gps_fields = export_builder.gps_fields
self.assertTrue(gps_fields.has_key('childrens_survey'))
self.assertEqual(
sorted(gps_fields['childrens_survey']),
sorted(expected_gps_fields['childrens_survey']))
def test_split_gps_components_works(self):
gps_fields =\
{
'geo/geolocation':
[
'geo/_geolocation_latitude', 'geo/_geolocation_longitude',
'geo/_geolocation_altitude', 'geo/_geolocation_precision'
]
}
row = \
{
'geo/geolocation': '1.0 36.1 2000 20',
}
new_row = ExportBuilder.split_gps_components(
row, gps_fields)
expected_row = \
{
'geo/geolocation': '1.0 36.1 2000 20',
'geo/_geolocation_latitude': '1.0',
'geo/_geolocation_longitude': '36.1',
'geo/_geolocation_altitude': '2000',
'geo/_geolocation_precision': '20'
}
self.assertEqual(new_row, expected_row)
def test_split_gps_components_works_when_gps_data_is_blank(self):
gps_fields =\
{
'geo/geolocation':
[
'geo/_geolocation_latitude', 'geo/_geolocation_longitude',
'geo/_geolocation_altitude', 'geo/_geolocation_precision'
]
}
row = \
{
'geo/geolocation': '',
}
new_row = ExportBuilder.split_gps_components(
row, gps_fields)
expected_row = \
{
'geo/geolocation': '',
}
self.assertEqual(new_row, expected_row)
def test_generation_of_mongo_encoded_fields_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
expected_encoded_fields =\
{
'childrens_survey':
{
'tel/tel.office': 'tel/{0}'.format(
_encode_for_mongo('tel.office')),
'tel/tel.mobile': 'tel/{0}'.format(
_encode_for_mongo('tel.mobile')),
}
}
encoded_fields = export_builder.encoded_fields
self.assertTrue('childrens_survey' in encoded_fields)
self.assertEqual(
encoded_fields['childrens_survey'],
expected_encoded_fields['childrens_survey'])
def test_decode_fields_names_encoded_for_mongo(self):
encoded_fields = \
{
'tel/tel.office': 'tel/{0}'.format(
_encode_for_mongo('tel.office'))
}
row = \
{
'name': 'Abe',
'age': 35,
'tel/{0}'.format(_encode_for_mongo('tel.office')): '123-456-789'
}
new_row = ExportBuilder.decode_mongo_encoded_fields(row, encoded_fields)
expected_row = \
{
'name': 'Abe',
'age': 35,
'tel/tel.office': '123-456-789'
}
self.assertEqual(new_row, expected_row)
def test_generate_field_title(self):
field_name = ExportBuilder.format_field_title("child/age", ".")
expected_field_name = "child.age"
self.assertEqual(field_name, expected_field_name)
def test_delimiter_replacement_works_existing_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_sections =\
[
{
'name': 'children',
'elements': [
{
'title': 'children.name',
'xpath': 'children/name'
}
]
}
]
children_section = export_builder.section_by_name('children')
self.assertEqual(
children_section['elements'][0]['title'],
expected_sections[0]['elements'][0]['title'])
def test_delimiter_replacement_works_generated_multi_select_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_section =\
{
'name': 'children',
'elements': [
{
'title': 'children.fav_colors.red',
'xpath': 'children/fav_colors/red'
}
]
}
childrens_section = export_builder.section_by_name('children')
match = filter(lambda x: expected_section['elements'][0]['xpath']
== x['xpath'], childrens_section['elements'])[0]
self.assertEqual(
expected_section['elements'][0]['title'], match['title'])
def test_delimiter_replacement_works_for_generated_gps_fields(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = "."
export_builder.set_survey(survey)
expected_section = \
{
'name': 'childrens_survey',
'elements': [
{
'title': 'geo._geolocation_latitude',
'xpath': 'geo/_geolocation_latitude'
}
]
}
main_section = export_builder.section_by_name('childrens_survey')
match = filter(
lambda x: (expected_section['elements'][0]['xpath']
== x['xpath']), main_section['elements'])[0]
self.assertEqual(
expected_section['elements'][0]['title'], match['title'])
def test_to_xls_export_works(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = load_workbook(filename)
# check that we have childrens_survey, children, children_cartoons
# and children_cartoons_characters sheets
expected_sheet_names = ['childrens_survey', 'children',
'children_cartoons',
'children_cartoons_characters']
self.assertEqual(wb.get_sheet_names(), expected_sheet_names)
# check header columns
main_sheet = wb.get_sheet_by_name('childrens_survey')
expected_column_headers = [
u'name', u'age', u'geo/geolocation', u'geo/_geolocation_latitude',
u'geo/_geolocation_longitude', u'geo/_geolocation_altitude',
u'geo/_geolocation_precision', u'tel/tel.office',
u'tel/tel.mobile', u'_id', u'meta/instanceID', u'_uuid',
u'_submission_time', u'_index', u'_parent_index',
u'_parent_table_name']
column_headers = [c[0].value for c in main_sheet.columns]
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
childrens_sheet = wb.get_sheet_by_name('children')
expected_column_headers = [
u'children/name', u'children/age', u'children/fav_colors',
u'children/fav_colors/red', u'children/fav_colors/blue',
u'children/fav_colors/pink', u'children/ice.creams',
u'children/ice.creams/vanilla', u'children/ice.creams/strawberry',
u'children/ice.creams/chocolate', u'_id', u'_uuid',
u'_submission_time', u'_index', u'_parent_index',
u'_parent_table_name']
column_headers = [c[0].value for c in childrens_sheet.columns]
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
cartoons_sheet = wb.get_sheet_by_name('children_cartoons')
expected_column_headers = [
u'children/cartoons/name', u'children/cartoons/why', u'_id',
u'_uuid', u'_submission_time', u'_index', u'_parent_index',
u'_parent_table_name']
column_headers = [c[0].value for c in cartoons_sheet.columns]
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
characters_sheet = wb.get_sheet_by_name('children_cartoons_characters')
expected_column_headers = [
u'children/cartoons/characters/name',
u'children/cartoons/characters/good_or_evil', u'_id', u'_uuid',
u'_submission_time', u'_index', u'_parent_index',
u'_parent_table_name']
column_headers = [c[0].value for c in characters_sheet.columns]
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
xls_file.close()
def test_to_xls_export_respects_custom_field_delimiter(self):
survey = self._create_childrens_survey()
export_builder = ExportBuilder()
export_builder.GROUP_DELIMITER = ExportBuilder.GROUP_DELIMITER_DOT
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = load_workbook(filename)
# check header columns
main_sheet = wb.get_sheet_by_name('childrens_survey')
expected_column_headers = [
u'name', u'age', u'geo.geolocation', u'geo._geolocation_latitude',
u'geo._geolocation_longitude', u'geo._geolocation_altitude',
u'geo._geolocation_precision', u'tel.tel.office',
u'tel.tel.mobile', u'_id', u'meta.instanceID', u'_uuid',
u'_submission_time', u'_index', u'_parent_index',
u'_parent_table_name']
column_headers = [c[0].value for c in main_sheet.columns]
self.assertEqual(sorted(column_headers),
sorted(expected_column_headers))
xls_file.close()
def test_get_valid_sheet_name_catches_duplicates(self):
work_sheets = {'childrens_survey': "Worksheet"}
desired_sheet_name = "childrens_survey"
expected_sheet_name = "childrens_survey1"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, work_sheets)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_get_valid_sheet_name_catches_long_names(self):
desired_sheet_name = "childrens_survey_with_a_very_long_name"
expected_sheet_name = "childrens_survey_with_a_very_lo"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, [])
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_get_valid_sheet_name_catches_long_duplicate_names(self):
work_sheet_titles = ['childrens_survey_with_a_very_lo']
desired_sheet_name = "childrens_survey_with_a_very_long_name"
expected_sheet_name = "childrens_survey_with_a_very_l1"
generated_sheet_name = ExportBuilder.get_valid_sheet_name(
desired_sheet_name, work_sheet_titles)
self.assertEqual(generated_sheet_name, expected_sheet_name)
def test_to_xls_export_generates_valid_sheet_names(self):
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'childrens_survey_with_a_very_long_name.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xls')
filename = xls_file.name
export_builder.to_xls_export(filename, self.data)
xls_file.seek(0)
wb = load_workbook(filename)
# check that we have childrens_survey, children, children_cartoons
# and children_cartoons_characters sheets
expected_sheet_names = ['childrens_survey_with_a_very_lo',
'childrens_survey_with_a_very_l1',
'childrens_survey_with_a_very_l2',
'childrens_survey_with_a_very_l3']
self.assertEqual(wb.get_sheet_names(), expected_sheet_names)
xls_file.close()
def test_child_record_parent_table_is_updated_when_sheet_is_renamed(self):
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_logger', 'tests', 'fixtures',
'childrens_survey_with_a_very_long_name.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
xls_file = NamedTemporaryFile(suffix='.xlsx')
filename = xls_file.name
export_builder.to_xls_export(filename, self.long_survey_data)
xls_file.seek(0)
wb = load_workbook(filename)
# get the children's sheet
ws1 = wb.get_sheet_by_name('childrens_survey_with_a_very_l1')
# parent_table is in cell K2
parent_table_name = ws1.cell('K2').value
expected_parent_table_name = 'childrens_survey_with_a_very_lo'
self.assertEqual(parent_table_name, expected_parent_table_name)
# get cartoons sheet
ws2 = wb.get_sheet_by_name('childrens_survey_with_a_very_l2')
parent_table_name = ws2.cell('G2').value
expected_parent_table_name = 'childrens_survey_with_a_very_l1'
self.assertEqual(parent_table_name, expected_parent_table_name)
xls_file.close()
def test_type_conversion(self):
submission_1 = {
"_id": 579827,
"geolocation": "-1.2625482 36.7924794 0.0 21.0",
"_bamboo_dataset_id": "",
"meta/instanceID": "uuid:2a8129f5-3091-44e1-a579-bed2b07a12cf",
"name": "Smith",
"formhub/uuid": "633ec390e024411ba5ce634db7807e62",
"_submission_time": "2013-07-03T08:25:30",
"age": "107",
"_uuid": "2a8129f5-3091-44e1-a579-bed2b07a12cf",
"when": "2013-07-03",
"_deleted_at": None,
"amount": "250.0",
"_geolocation": [
"-1.2625482",
"36.7924794"
],
"_xform_id_string": "test_data_types",
"_userform_id": "larryweya_test_data_types",
"_status": "submitted_via_web",
"precisely": "2013-07-03T15:24:00.000+03",
"really": "15:24:00.000+03"
}
submission_2 = {
"_id": 579828,
"_submission_time": "2013-07-03T08:26:10",
"_uuid": "5b4752eb-e13c-483e-87cb-e67ca6bb61e5",
"_bamboo_dataset_id": "",
"_deleted_at": None,
"_xform_id_string": "test_data_types",
"_userform_id": "larryweya_test_data_types",
"_status": "submitted_via_web",
"meta/instanceID": "uuid:5b4752eb-e13c-483e-87cb-e67ca6bb61e5",
"formhub/uuid": "633ec390e024411ba5ce634db7807e62",
"amount": "",
}
survey = create_survey_from_xls(
os.path.join(
os.path.abspath('./'), 'odk_viewer', 'tests', 'fixtures',
'test_data_types/test_data_types.xls'))
export_builder = ExportBuilder()
export_builder.set_survey(survey)
# format submission 1 for export
survey_name = survey.name
indices = {survey_name: 0}
data = dict_to_joined_export(submission_1, 1, indices, survey_name)
new_row = export_builder.pre_process_row(data[survey_name],
export_builder.sections[0])
self.assertIsInstance(new_row['age'], int)
self.assertIsInstance(new_row['when'], datetime.date)
#self.assertIsInstance(new_row['precisely'], datetime.datetime)
self.assertIsInstance(new_row['amount'], float)
#self.assertIsInstance(new_row['_submission_time'], datetime.datetime)
#self.assertIsInstance(new_row['really'], datetime.time)
# check missing values dont break and empty values return blank strings
indices = {survey_name: 0}
data = dict_to_joined_export(submission_2, 1, indices, survey_name)
new_row = export_builder.pre_process_row(data[survey_name],
export_builder.sections[0])
self.assertIsInstance(new_row['amount'], basestring)
self.assertEqual(new_row['amount'], '')
def test_convert_types(self):
val = '1'
expected_val = 1
converted_val = ExportBuilder.convert_type(val, 'int')
self.assertIsInstance(converted_val, int)
self.assertEqual(converted_val, expected_val)
val = '1.2'
expected_val = 1.2
converted_val = ExportBuilder.convert_type(val, 'decimal')
self.assertIsInstance(converted_val, float)
self.assertEqual(converted_val, expected_val)
val = '2012-06-23'
expected_val = datetime.date(2012, 6, 23)
converted_val = ExportBuilder.convert_type(val, 'date')
self.assertIsInstance(converted_val, datetime.date)
self.assertEqual(converted_val, expected_val)
| bsd-2-clause |
vamsirajendra/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkagg.py | 70 | 4184 | """
Render to gtk from agg
"""
from __future__ import division
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_gtkagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKAgg(thisFig)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print 'backend_gtkagg.new_figure_manager done'
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print 'FigureCanvasGTKAgg.configure_event'
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print 'FigureCanvasGTKAgg.configure_event end'
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print 'FigureCanvasGTKAgg.render_figure'
FigureCanvasAgg.draw(self)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba(0,0)
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure done'
def blit(self, bbox=None):
if DEBUG: print 'FigureCanvasGTKAgg.blit'
if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print 'FigureCanvasGTKAgg.done'
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
| agpl-3.0 |
fengzhyuan/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/frontend/qt/console/qtconsoleapp.py | 3 | 12925 | """ A minimal application using the Qt console-style IPython frontend.
This is not a complete console app, as subprocess will not be able to receive
input, there is no real readline support, among other limitations.
Authors:
* Evan Patterson
* Min RK
* Erik Tollerud
* Fernando Perez
* Bussonnier Matthias
* Thomas Kluyver
* Paul Ivanov
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import json
import os
import signal
import sys
import uuid
# If run on Windows, install an exception hook which pops up a
# message box. Pythonw.exe hides the console, so without this
# the application silently fails to load.
#
# We always install this handler, because the expectation is for
# qtconsole to bring up a GUI even if called from the console.
# The old handler is called, so the exception is printed as well.
# If desired, check for pythonw with an additional condition
# (sys.executable.lower().find('pythonw.exe') >= 0).
if os.name == 'nt':
old_excepthook = sys.excepthook
def gui_excepthook(exctype, value, tb):
try:
import ctypes, traceback
MB_ICONERROR = 0x00000010L
title = u'Error starting IPython QtConsole'
msg = u''.join(traceback.format_exception(exctype, value, tb))
ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR)
finally:
# Also call the old exception hook to let it do
# its thing too.
old_excepthook(exctype, value, tb)
sys.excepthook = gui_excepthook
# System library imports
from IPython.external.qt import QtCore, QtGui
# Local imports
from IPython.config.application import boolean_flag, catch_config_error
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.lib.kernel import tunnel_to_kernel, find_connection_file
from IPython.frontend.qt.console.frontend_widget import FrontendWidget
from IPython.frontend.qt.console.ipython_widget import IPythonWidget
from IPython.frontend.qt.console.rich_ipython_widget import RichIPythonWidget
from IPython.frontend.qt.console import styles
from IPython.frontend.qt.console.mainwindow import MainWindow
from IPython.frontend.qt.kernelmanager import QtKernelManager
from IPython.utils.path import filefind
from IPython.utils.py3compat import str_to_bytes
from IPython.utils.traitlets import (
Dict, List, Unicode, Integer, CaselessStrEnum, CBool, Any
)
from IPython.zmq.ipkernel import IPKernelApp
from IPython.zmq.session import Session, default_secure
from IPython.zmq.zmqshell import ZMQInteractiveShell
from IPython.frontend.consoleapp import (
IPythonConsoleApp, app_aliases, app_flags, flags, aliases
)
#-----------------------------------------------------------------------------
# Network Constants
#-----------------------------------------------------------------------------
from IPython.utils.localinterfaces import LOCALHOST, LOCAL_IPS
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
_examples = """
ipython qtconsole # start the qtconsole
ipython qtconsole --pylab=inline # start with pylab in inline plotting mode
"""
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
# start with copy of flags
flags = dict(flags)
qt_flags = {
'plain' : ({'IPythonQtConsoleApp' : {'plain' : True}},
"Disable rich text support."),
}
# and app_flags from the Console Mixin
qt_flags.update(app_flags)
# add frontend flags to the full set
flags.update(qt_flags)
# start with copy of front&backend aliases list
aliases = dict(aliases)
qt_aliases = dict(
style = 'IPythonWidget.syntax_style',
stylesheet = 'IPythonQtConsoleApp.stylesheet',
colors = 'ZMQInteractiveShell.colors',
editor = 'IPythonWidget.editor',
paging = 'ConsoleWidget.paging',
)
# and app_aliases from the Console Mixin
qt_aliases.update(app_aliases)
qt_aliases.update({'gui-completion':'ConsoleWidget.gui_completion'})
# add frontend aliases to the full set
aliases.update(qt_aliases)
# get flags&aliases into sets, and remove a couple that
# shouldn't be scrubbed from backend flags:
qt_aliases = set(qt_aliases.keys())
qt_aliases.remove('colors')
qt_flags = set(qt_flags.keys())
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# IPythonQtConsole
#-----------------------------------------------------------------------------
class IPythonQtConsoleApp(BaseIPythonApplication, IPythonConsoleApp):
name = 'ipython-qtconsole'
description = """
The IPython QtConsole.
This launches a Console-style application using Qt. It is not a full
console, in that launched terminal subprocesses will not be able to accept
input.
The QtConsole supports various extra features beyond the Terminal IPython
shell, such as inline plotting with matplotlib, via:
ipython qtconsole --pylab=inline
as well as saving your session as HTML, and printing the output.
"""
examples = _examples
classes = [IPythonWidget] + IPythonConsoleApp.classes
flags = Dict(flags)
aliases = Dict(aliases)
frontend_flags = Any(qt_flags)
frontend_aliases = Any(qt_aliases)
kernel_manager_class = QtKernelManager
stylesheet = Unicode('', config=True,
help="path to a custom CSS stylesheet")
plain = CBool(False, config=True,
help="Use a plaintext widget instead of rich text (plain can't print/save).")
def _plain_changed(self, name, old, new):
kind = 'plain' if new else 'rich'
self.config.ConsoleWidget.kind = kind
if new:
self.widget_factory = IPythonWidget
else:
self.widget_factory = RichIPythonWidget
# the factory for creating a widget
widget_factory = Any(RichIPythonWidget)
def parse_command_line(self, argv=None):
super(IPythonQtConsoleApp, self).parse_command_line(argv)
self.build_kernel_argv(argv)
def new_frontend_master(self):
""" Create and return new frontend attached to new kernel, launched on localhost.
"""
ip = self.ip if self.ip in LOCAL_IPS else LOCALHOST
kernel_manager = self.kernel_manager_class(
ip=ip,
connection_file=self._new_connection_file(),
config=self.config,
)
# start the kernel
kwargs = dict()
kwargs['extra_arguments'] = self.kernel_argv
kernel_manager.start_kernel(**kwargs)
kernel_manager.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=True)
self.init_colors(widget)
widget.kernel_manager = kernel_manager
widget._existing = False
widget._may_close = True
widget._confirm_exit = self.confirm_exit
return widget
def new_frontend_slave(self, current_widget):
"""Create and return a new frontend attached to an existing kernel.
Parameters
----------
current_widget : IPythonWidget
The IPythonWidget whose kernel this frontend is to share
"""
kernel_manager = self.kernel_manager_class(
connection_file=current_widget.kernel_manager.connection_file,
config = self.config,
)
kernel_manager.load_connection_file()
kernel_manager.start_channels()
widget = self.widget_factory(config=self.config,
local_kernel=False)
self.init_colors(widget)
widget._existing = True
widget._may_close = False
widget._confirm_exit = False
widget.kernel_manager = kernel_manager
return widget
def init_qt_elements(self):
# Create the widget.
self.app = QtGui.QApplication([])
base_path = os.path.abspath(os.path.dirname(__file__))
icon_path = os.path.join(base_path, 'resources', 'icon', 'IPythonConsole.svg')
self.app.icon = QtGui.QIcon(icon_path)
QtGui.QApplication.setWindowIcon(self.app.icon)
local_kernel = (not self.existing) or self.ip in LOCAL_IPS
self.widget = self.widget_factory(config=self.config,
local_kernel=local_kernel)
self.init_colors(self.widget)
self.widget._existing = self.existing
self.widget._may_close = not self.existing
self.widget._confirm_exit = self.confirm_exit
self.widget.kernel_manager = self.kernel_manager
self.window = MainWindow(self.app,
confirm_exit=self.confirm_exit,
new_frontend_factory=self.new_frontend_master,
slave_frontend_factory=self.new_frontend_slave,
)
self.window.log = self.log
self.window.add_tab_with_frontend(self.widget)
self.window.init_menu_bar()
self.window.setWindowTitle('IPython')
def init_colors(self, widget):
"""Configure the coloring of the widget"""
# Note: This will be dramatically simplified when colors
# are removed from the backend.
# parse the colors arg down to current known labels
try:
colors = self.config.ZMQInteractiveShell.colors
except AttributeError:
colors = None
try:
style = self.config.IPythonWidget.syntax_style
except AttributeError:
style = None
try:
sheet = self.config.IPythonWidget.style_sheet
except AttributeError:
sheet = None
# find the value for colors:
if colors:
colors=colors.lower()
if colors in ('lightbg', 'light'):
colors='lightbg'
elif colors in ('dark', 'linux'):
colors='linux'
else:
colors='nocolor'
elif style:
if style=='bw':
colors='nocolor'
elif styles.dark_style(style):
colors='linux'
else:
colors='lightbg'
else:
colors=None
# Configure the style
if style:
widget.style_sheet = styles.sheet_from_template(style, colors)
widget.syntax_style = style
widget._syntax_style_changed()
widget._style_sheet_changed()
elif colors:
# use a default dark/light/bw style
widget.set_default_style(colors=colors)
if self.stylesheet:
# we got an explicit stylesheet
if os.path.isfile(self.stylesheet):
with open(self.stylesheet) as f:
sheet = f.read()
else:
raise IOError("Stylesheet %r not found." % self.stylesheet)
if sheet:
widget.style_sheet = sheet
widget._style_sheet_changed()
def init_signal(self):
"""allow clean shutdown on sigint"""
signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2))
# need a timer, so that QApplication doesn't block until a real
# Qt event fires (can require mouse movement)
# timer trick from http://stackoverflow.com/q/4938723/938949
timer = QtCore.QTimer()
# Let the interpreter run each 200 ms:
timer.timeout.connect(lambda: None)
timer.start(200)
# hold onto ref, so the timer doesn't get cleaned up
self._sigint_timer = timer
@catch_config_error
def initialize(self, argv=None):
super(IPythonQtConsoleApp, self).initialize(argv)
IPythonConsoleApp.initialize(self,argv)
self.init_qt_elements()
self.init_signal()
def start(self):
# draw the window
self.window.show()
self.window.raise_()
# Start the application main loop.
self.app.exec_()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
def main():
app = IPythonQtConsoleApp()
app.initialize()
app.start()
if __name__ == '__main__':
main()
| lgpl-3.0 |
DKarev/isolation-forest | clearcut_utils.py | 1 | 2088 | #!/usr/bin/env python
import pandas as pd
import numpy as np
def load_brofile(filename, fields_to_use):
fields = ['ts',
'uid',
'orig_h',
'orig_p',
'resp_h',
'resp_p',
'trans_depth',
'method',
'host',
'uri',
'referrer',
'user_agent',
'request_body_len',
'response_body_len',
'status_code',
'status_msg',
'info_code',
'info_msg',
'filename',
'tags',
'username',
'password',
'proxied orig_fuids',
'orig_mime_types',
'resp_fuids',
'resp_mime_types']
df = pd.read_csv(filename,
header=None,
sep='\t',
names=fields,
skiprows=8,
skipfooter=1,
index_col=False,
quotechar=None,
quoting=3,
engine='python')
return df[fields_to_use]
def create_noise_contrast(df, num_samples):
"""
Create a noise contrasted dataframe from a dataframe. We do this
by sampling columns with replacement one at a time from the original
data, and then stitching those columns together into de-correlated rows.
Parameters
----------
df : dataframe
The enhanced HTTP log dataframe
num_samples : int
Number of new rows to create
Returns
-------
newDf : dataframe
"""
newDf = None
for field in list(df):
#sample the column with replacement.
df1 = df[[field]].sample(n=num_samples, replace=True).reset_index(drop=True)
#add the new column to the answer (or start a new df if this is the first column)
if (newDf is not None):
newDf = pd.concat([newDf, df1], axis = 1)
else:
newDf = df1
return newDf
| apache-2.0 |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/backends/backend_macosx.py | 11 | 15754 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib import rcParams
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
class Show(ShowBase):
def mainloop(self):
_macosx.show()
show = Show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forward the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.gc.set_dpi(self.dpi)
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_path(path, transform, linewidth, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
linewidth = gc.get_linewidth()
gc.draw_markers(marker_path, marker_trans, path, trans, linewidth, rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
if offset_position=='data':
offset_position = True
else:
offset_position = False
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
master_transform = master_transform.get_matrix()
offsetTrans = offsetTrans.get_matrix()
gc.draw_path_collection(master_transform, path_ids, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds,
offset_position)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
gc.draw_quad_mesh(master_transform.get_matrix(),
meshWidth,
meshHeight,
coordinates,
offsets,
offsetTrans.get_matrix(),
facecolors,
antialiased,
edgecolors)
def new_gc(self):
self.gc.save()
self.gc.set_hatch(None)
self.gc._alpha = 1.0
self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA
return self.gc
def draw_gouraud_triangle(self, gc, points, colors, transform):
points = transform.transform(points)
gc.draw_gouraud_triangle(points, colors)
def get_image_magnification(self):
return self.gc.get_image_magnification()
def draw_image(self, gc, x, y, im):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
gc.draw_image(x, y, nrows, ncols, data)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
# todo, handle props, angle, origins
scale = self.gc.get_image_magnification()
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi*scale)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
scale = self.gc.get_image_magnification()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi*scale, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
weight = prop.get_weight()
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
gc.draw_text(x, y, six.text_type(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# todo: handle props
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
weight = prop.get_weight()
style = prop.get_style()
points = prop.get_size_in_points()
size = self.points_to_pixels(points)
width, height, descent = self.gc.get_text_width_height_descent(
six.text_type(s), family, size, weight, style)
return width, height, 0.0*descent
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_alpha(self, alpha):
GraphicsContextBase.set_alpha(self, alpha)
_alpha = self.get_alpha()
_macosx.GraphicsContext.set_alpha(self, _alpha, self.get_forced_alpha())
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_foreground(self, fg, isRGBA=False):
GraphicsContextBase.set_foreground(self, fg, isRGBA)
rgb = self.get_rgb()
_macosx.GraphicsContext.set_foreground(self, rgb)
def set_graylevel(self, fg):
GraphicsContextBase.set_graylevel(self, fg)
_macosx.GraphicsContext.set_graylevel(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, figure)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class TimerMac(_macosx.Timer, TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses CoreFoundation
run loops for timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
# completely implemented at the C-level (in _macosx.Timer)
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['gif'] = 'Graphics Interchange Format'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def _print_bitmap(self, filename, *args, **kwargs):
# In backend_bases.py, print_figure changes the dpi of the figure.
# But since we are essentially redrawing the picture, we need the
# original dpi. Pick it up from the renderer.
dpi = kwargs['dpi']
old_dpi = self.figure.dpi
self.figure.dpi = self.renderer.dpi
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
filename = six.text_type(filename)
self.write_bitmap(filename, width, height, dpi)
self.figure.dpi = old_dpi
def print_bmp(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_jpeg(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_tiff(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def print_gif(self, filename, *args, **kwargs):
self._print_bitmap(filename, *args, **kwargs)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of :class:`backend_bases.Timer`.
This is useful for getting periodic events through the backend's native
event loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerMac(*args, **kwargs)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
if matplotlib.is_interactive():
self.show()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(int(x0), int(y0), int(x1), int(y1))
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self, *args):
filename = _macosx.choose_save_file('Save the figure',
self.canvas.get_default_filename())
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
def dynamic_update(self):
self.canvas.draw_idle()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasMac
FigureManager = FigureManagerMac
| mit |
JohanComparat/nbody-npt-functions | bin/bin_SMHMr/measure_SMF.py | 1 | 3645 | #import StellarMass
import XrayLuminosity
import numpy as n
from scipy.stats import norm
from scipy.integrate import quad
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import glob
import astropy.io.fits as fits
import os
import time
import numpy as n
import sys
print " set up box, and redshift "
#MD 1 hlist_0.74980_SAM_Nb_0.fits
#MD 25 hlist_0.75440_SAM_Nb_10.fits
#duty_cycle = 0.01
bins = n.arange(6,13,0.1)
xb = (bins[1:] + bins[:-1]) / 2.
def measureSMF(snap_name, env='MD10', volume=1000.**3., out_dir="../"):
fileList = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_Ms.fits")))
fileList.sort()
print fileList
Hall = n.zeros((len(fileList), len(bins)-1))
for ii, fileN in enumerate(fileList):
print fileN
hh = fits.open(fileN)
mass = hh[1].data['stellar_mass_Mo13_mvir']
print mass
selection = (mass>0) # (hh[1].data['stellar_mass_reliable'])&(mass>0)
Hall[ii], bb = n.histogram(hh[1].data['stellar_mass_Mo13_mvir'], bins=bins)
counts = n.sum(Hall, axis=0)
dN_dVdlogM = counts*0.6777**3./(bins[1:]-bins[:-1])/volume/n.log(10)
data = n.transpose([bins[:-1], bins[1:], counts, dN_dVdlogM ])
n.savetxt(os.path.join(out_dir, "out_"+snap_name+"_SMF.txt"), data, header = "logMs_low logMs_up counts dN_dVdlogM")
def measureSMF_tracer(snap_name, tracer_name, env='MD10', volume=1000.**3., out_dir="../"):
out_file = os.path.join(out_dir, "out_"+snap_name+"_"+tracer_name+"_SMF.txt")
#if os.path.isfile(out_file)==False:
fileList = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_Ms.fits")))
fileList.sort()
fileList_T = n.array(glob.glob(os.path.join(os.environ[env], "work_agn", "out_"+snap_name+"_SAM_Nb_*_"+tracer_name+".fits")))
fileList_T.sort()
tracer_name
print fileList, fileList_T
if len(fileList_T)==len(fileList):
Hall = n.zeros((len(fileList), len(bins)-1))
for ii, fileN in enumerate(fileList):
print fileN
hh = fits.open(fileN)
lines = fits.open(fileList_T[ii])[1].data['line_number']
mass = hh[1].data['stellar_mass_Mo13_mvir'][lines]
Hall[ii], bb = n.histogram(mass, bins=bins)
counts = n.sum(Hall, axis=0)
dN_dVdlogM = counts*0.6777**3./(bins[1:]-bins[:-1])/volume/n.log(10)
data = n.transpose([bins[:-1], bins[1:], counts, dN_dVdlogM ])
n.savetxt(out_file, data, header = "logMs_low logMs_up counts dN_dVdlogM")
# open the output file_type
summ = fits.open(os.path.join(os.environ["MD10"], 'output_MD_1.0Gpc.fits'))[1].data
out_dir = os.path.join(os.path.join(os.environ['MD10'], "duty_cycle"))
for el in summ[::-1]:
print el
measureSMF(snap_name=el["snap_name"], env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S5_BCG", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S5_GAL", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S6_AGN", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_BG1", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_BG2", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_ELG", env='MD10', volume=1000.**3., out_dir = out_dir)
#measureSMF_tracer(snap_name=el["snap_name"], tracer_name="4MOST_S8_QSO", env='MD10', volume=1000.**3., out_dir = out_dir)
| cc0-1.0 |
MaxHalford/Prince | setup.py | 1 | 3727 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pip install twine
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'prince'
DESCRIPTION = 'Statistical factor analysis in Python'
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = 'https://github.com/MaxHalford/prince'
EMAIL = '[email protected]'
AUTHOR = 'Max Halford'
REQUIRES_PYTHON = '>=3.4.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
'matplotlib>=3.0.2',
'numpy>=1.16.1',
'pandas>=0.24.0',
'scipy>=1.1.0',
'scikit-learn>=0.20.1'
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| mit |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| mit |
pv/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
free2tedy/Licenta | gui/main.py | 1 | 27389 | from threading import Thread
from PyQt5.QtCore import Qt, pyqtSignal, QDate
from PyQt5.QtWidgets import QPushButton, QApplication, QWidget, QDesktopWidget
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QGroupBox
from PyQt5.QtWidgets import QSizePolicy, QLineEdit, QDialog, QListWidget
from PyQt5.QtWidgets import QGridLayout, QComboBox, QDateEdit
from articol import Article
from MySQLdb import IntegrityError
from crawler.settings import mysql_conn
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from crawler.spiders.hotnewsPoliticSumar import HotnewspoliticSpiderSumar
from scrapy.utils.project import get_project_settings
import re
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
import plotly.plotly as py
from plotly.graph_objs import *
from PIL import Image
classifiers = {
'Multinomial NB': MultinomialNB(),
'Gaussian NB': GaussianNB(),
'Bernoulli NB': BernoulliNB(),
'SVM': SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42)
}
monthIntToString = {
1: 'Ian',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'Mai',
6: 'Iun',
7: 'Iul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Noi',
12: 'Dec'
}
reportTypes = [
"Opinions/Entity Selected",
"Opinions/All Entities",
"Appearances/Entity Selected",
"Appearances/All Entities"
]
class MainWindow(QWidget):
articleInfoUpdate = pyqtSignal()
entityUpdate = pyqtSignal()
crawlerUpdate = pyqtSignal()
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
self.articleInfoUpdate.connect(self.updateArticleInfo)
self.entityUpdate.connect(self.updateEntityList)
def initUI(self):
self.setGeometry(0, 0, 500, 700)
self.center()
self.setWindowTitle('PView')
mainLayout = QVBoxLayout()
self.createArticleInfoBox()
self.createViewArticleBox()
self.createEntityBox()
self.createReportBox()
self.createDatabaseBox()
mainLayout.addWidget(self.infoBox)
mainLayout.addWidget(self.viewArticleBox)
mainLayout.addWidget(self.entityBox)
mainLayout.addWidget(self.raportBox)
mainLayout.addWidget(self.databaseBox)
self.setLayout(mainLayout)
self.show()
def createArticleInfoBox(self):
self.articleCount = self.selectCountArticles()
entityCount = self.selectCountEntities()
associationsCount = self.selectCountAssociations()
classifiedCount = self.selectCountClassifiedAssociations()
label = "Number of articles: " + str(self.articleCount)
self.articleCountLabel = QLabel(label)
label = "Number of entities: " + str(entityCount)
self.entitiesCountLabel = QLabel(label)
label = "Number of associations: " + str(associationsCount)
self.associationsCountLabel = QLabel(label)
label = "Number of classified associations: " + str(classifiedCount)
self.classifiedCountLabel = QLabel(label)
layout = QVBoxLayout()
layout.addWidget(self.articleCountLabel)
layout.addWidget(self.entitiesCountLabel)
layout.addWidget(self.associationsCountLabel)
layout.addWidget(self.classifiedCountLabel)
self.infoBox = QGroupBox("Statistics")
self.infoBox.setLayout(layout)
def createCrawlerBox(self):
self.crawlButton = QPushButton("Crawl")
self.crawlButton.setFocusPolicy(Qt.NoFocus)
self.websiteList = QComboBox()
self.websiteList.addItem("HotNews")
layout = QGridLayout()
layout.addWidget(self.websiteList, 0, 0, 1, 1)
layout.addWidget(self.crawlButton, 0, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.crawlerBox = QGroupBox("Crawler")
self.crawlerBox.setLayout(layout)
def createViewArticleBox(self):
self.articleNumberLineEdit = QLineEdit("")
self.articleNumberLineEdit.setAlignment(Qt.AlignHCenter)
self.viewArticleButton = QPushButton("Open")
self.viewArticleButton.clicked.connect(self.viewArticle)
layout = QGridLayout()
layout.addWidget(self.articleNumberLineEdit, 0, 0, 1, 1)
layout.addWidget(self.viewArticleButton, 0, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.viewArticleBox = QGroupBox("View Article")
self.viewArticleBox.setLayout(layout)
def createReportBox(self):
minDate, maxDate = self.selectMinAndMaxDate()
minDate = QDate(minDate.year, minDate.month, minDate.day)
maxDate = QDate(maxDate.year, maxDate.month, maxDate.day)
self.fromDateEdit = QDateEdit()
self.fromDateEdit.setDate(minDate)
self.fromDateEdit.setDisplayFormat('d MMM yyyy')
self.fromDateEdit.setAlignment(Qt.AlignHCenter)
self.toDateEdit = QDateEdit()
self.toDateEdit.setDate(maxDate)
self.toDateEdit.setDisplayFormat('d MMM yyyy')
self.toDateEdit.setAlignment(Qt.AlignHCenter)
self.reportTypeComboBox = QComboBox()
for item in reportTypes:
self.reportTypeComboBox.addItem(item)
monthlyButton = QPushButton("View")
monthlyButton.clicked.connect(self.createReport)
layout = QGridLayout()
layout.addWidget(self.fromDateEdit, 0, 0, 1, 1)
layout.addWidget(self.toDateEdit, 0, 1, 1, 1)
layout.addWidget(self.reportTypeComboBox, 1, 0, 1, 1)
layout.addWidget(monthlyButton, 1, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.raportBox = QGroupBox("Charts")
self.raportBox.setLayout(layout)
def createEntityBox(self):
rows = self.selectCountEntities()
self.entityList = QListWidget()
entities = self.selectAllEntities()
for entity in entities:
self.doAssociationsForEntity(entity[1])
self.entityList.addItem(entity[1])
addButton = QPushButton("Add")
addButton.clicked.connect(self.addEntity)
removeButton = QPushButton("Delete")
removeButton.clicked.connect(self.removeEntity)
self.addEntityLineEdit = QLineEdit("")
viewArticlesButton = QPushButton("View articles")
viewArticlesButton.clicked.connect(self.viewArticleByEntity)
self.algorithmComboBox = QComboBox()
for key in classifiers.keys():
self.algorithmComboBox.addItem(key)
classifyButton = QPushButton("Classify")
classifyButton.clicked.connect(self.classifyAllAssociations)
layout = QGridLayout()
layout.addWidget(self.entityList, 0, 0, 1, 4)
layout.addWidget(self.addEntityLineEdit, 1, 0, 1, 2)
layout.addWidget(addButton, 1, 2, 1, 1)
layout.addWidget(removeButton, 1, 3, 1, 1)
layout.addWidget(viewArticlesButton, 2, 0, 1, 4)
layout.addWidget(self.algorithmComboBox, 3, 0, 1, 2)
layout.addWidget(classifyButton, 3, 2, 1, 2)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
layout.setColumnStretch(2, 1)
layout.setColumnStretch(3, 1)
self.entityBox = QGroupBox("Entities")
self.entityBox.setLayout(layout)
def createDatabaseBox(self):
deleteClassificationsButton = QPushButton("Remove all classifications")
deleteClassificationsButton.clicked.connect(self.clearAllCalculatedPolarities)
deleteEntitiesButton = QPushButton("Remove all entities")
deleteEntitiesButton.clicked.connect(self.deleteAllEntities)
deleteAssociationsButton = QPushButton("Remove all associations")
deleteAssociationsButton.clicked.connect(self.deleteAllAssociations)
layout = QVBoxLayout()
layout.addWidget(deleteClassificationsButton)
layout.addWidget(deleteAssociationsButton)
layout.addWidget(deleteEntitiesButton)
self.databaseBox = QGroupBox("Database")
self.databaseBox.setLayout(layout)
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def monthsBetweenDates(self, fromDate, toDate):
curDate = QDate(fromDate)
months =[]
while curDate < toDate:
months.append(curDate)
curDate = curDate.addMonths(1)
return months
def makeMonthlyPolarityChart(self, entities, fromDate, toDate):
cursor = mysql_conn.cursor()
chartData = []
for (entityId, entity) in entities:
monthlyPol = self.selectAllPolaritiesForEntity(entityId, fromDate, toDate)
trace0=Bar(
x = [self.monthYearLabel(month) for (month, _, _) in monthlyPol],
y = [(0.0 + rows.count(1L)) / (l+1) * 100 for (_, l, rows) in monthlyPol],
name = entity,
marker = Marker(
color = 'rgb(204,204,204)',
opacity = 0.5,
),
)
chartData.append(trace0)
chartData = Data(chartData)
layout = Layout(
xaxis=XAxis(
#set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
barmode='group',
)
fig = Figure(data = chartData, layout = layout)
py.image.save_as({'data': chartData}, "polarities.png")
img = Image.open("polarities.png")
img.show()
def makeMonthlyAppearanceChart(self, entities, fromDate, toDate):
cursor = mysql_conn.cursor()
chartData = []
for (entityId, entity) in entities:
monthlyApp = self.selectCountAssociationsForEntityBetweenDates(entityId, fromDate, toDate)
trace0=Bar(
x = [self.monthYearLabel(month) for (month, _) in monthlyApp],
y = [count for (_, count) in monthlyApp],
name = entity,
marker = Marker(
color = 'rgb(204,204,204)',
opacity = 0.5,
),
)
chartData.append(trace0)
chartData = Data(chartData)
layout = Layout(
xaxis=XAxis(
#set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
barmode='group',
)
fig = Figure(data = chartData, layout = layout)
py.image.save_as({'data': chartData}, "appearances.png")
img = Image.open("appearances.png")
img.show()
def getStringDate(self, date):
sDate = str(date.year())
sDate += '-'+str(date.month())
sDate += '-'+'01'
time = '00:00:00'
return sDate + ' ' + time
def monthYearLabel(self, date):
label = monthIntToString[date.month()] + ' '
label += str(date.year())
return label
def createReport(self):
reportType = self.reportTypeComboBox.currentText()
fromDate = self.fromDateEdit.date()
toDate = self.toDateEdit.date()
entities = []
if "All entities" in reportType:
entities = self.selectAllEntities()
else:
selected = self.entityList.selectedItems()
if len(selected) == 1:
entity = selected[0].text()
entities = [(self.selectEntityId(entity), entity)]
if len(entities) > 0:
if "Opinions" in reportType:
self.makeMonthlyPolarityChart(entities, fromDate, toDate)
else:
print entities
self.makeMonthlyAppearanceChart(entities, fromDate, toDate)
def viewArticle(self):
try:
articleId = int(self.articleNumberLineEdit.text())
if articleId > 0 and articleId < self.articleCount:
self.viewArticleButton.setEnabled(False)
self.articleNumberLineEdit.setDisabled(True)
articleList = [i+1 for i in xrange(self.articleCount)]
articleView = Article(articleId-1, articleList, parentW=self)
articleView.exec_()
self.viewArticleButton.setEnabled(True)
self.articleNumberLineEdit.setDisabled(False)
except ValueError:
print "Invalid article id"
def viewArticleByEntity(self):
selected = self.entityList.selectedItems()
if len(selected) == 1:
articles = self.selectAllArticlesByEntity(selected[0].text())
articleList = [a[0] for a in articles]
articleView = Article(0, articleList, shuffle_=True, parentW=self)
articleView.exec_()
def addEntity(self):
newEntity = self.addEntityLineEdit.text().strip()
newEntity = re.sub(r' +', ' ', newEntity)
cursor = mysql_conn.cursor()
if len(newEntity) != 0:
selectStmt = """SELECT *
FROM entities
WHERE entity=%s"""
data = (newEntity,)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
if len(rows) == 0:
insertStmt = """INSERT INTO entities (entity)
VALUES (%s)"""
data = (newEntity,)
cursor.execute(insertStmt, data)
cursor.execute("""COMMIT""")
self.entityUpdate.emit()
self.doAssociationsForEntity(newEntity)
self.addEntityLineEdit.setText("")
def removeEntity(self):
selected = self.entityList.selectedItems()
cursor = mysql_conn.cursor()
for item in selected:
self.deleteAssciationsForEntity(item.text())
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (item.text(),)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()
deleteStmt = """DELETE FROM entities
WHERE entity_id=%s"""
data = (entityId[0],)
cursor.execute(deleteStmt, data)
cursor.execute("""COMMIT""")
self.entityUpdate.emit()
def updateEntityList(self):
self.entityList.clear()
entities = self.selectAllEntities()
for entity in entities:
self.entityList.addItem(entity[1])
label = "Number of entities: " + str(len(entities))
self.entitiesCountLabel.setText(label)
def updateArticleInfo(self):
self.articleCount = self.selectCountArticles()
entityCount = self.selectCountEntities()
associationsCount = self.selectCountAssociations()
classifiedCount = self.selectCountClassifiedAssociations()
label = "Number of articles: " + str(self.articleCount)
self.articleCountLabel.setText(label)
label = "Number of entities: " + str(entityCount)
self.entitiesCountLabel.setText(label)
label = "Number of classified associations: " + str(classifiedCount)
self.classifiedCountLabel.setText(label)
label = "Number of associations: " + str(associationsCount)
self.associationsCountLabel.setText(label)
def classifyAllAssociations(self):
cursor = mysql_conn.cursor()
entities = self.selectAllEntities()
for (entityId, entity) in entities:
manualPol = self.selectManualPolaritiesForEntity(entityId)
trainingData = [self.selectArticle(id_)[4] for (id_, _) in manualPol]
trainingTarget = [polarity for (_, polarity) in manualPol]
algorithm = self.algorithmComboBox.currentText()
textClf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', classifiers[algorithm]),
])
textClf.fit(trainingData, trainingTarget)
# select all articles associated with entity that need to be classified
selectStmt = """SELECT article_id
FROM assocEntityArticle
WHERE polarity_manual IS NULL
AND polarity_calculated IS NULL
AND entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
ids = cursor.fetchall()
if len(ids) > 0:
ids = [a[0] for a in ids]
testData = [self.selectArticle(id_)[4] for id_ in ids]
predicted = textClf.predict(testData)
print [x for x in predicted].count(1)
updateData = zip(predicted, ids)
updateData = [(polarity, entityId, id_) for (polarity, id_) in updateData]
updateStmt = """UPDATE assocEntityArticle
SET polarity_calculated=%s
WHERE entity_id=%s AND article_id=%s"""
cursor.executemany(updateStmt, updateData)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def selectArticle(self, articleId):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM articles
WHERE article_id=%s"""
data = (articleId,)
cursor.execute(selectStmt, data)
row = cursor.fetchone()
return row
def selectEntityId(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (entity,)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()[0]
return entityId
def selectAllArticlesByEntity(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM articles
WHERE content LIKE %s"""
data = ("%" + entity + "%",)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
return rows
def selectAllEntities(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM entities"""
cursor.execute(selectStmt)
rows = cursor.fetchall()
return rows
def selectMinAndMaxDate(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT MIN(date), MAX(date)
FROM articles"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row
def selectCountArticles(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM articles"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAuthors(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM authors"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountEntities(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM entities"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAssociations(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM assocEntityArticle"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAssociationsForEntityBetweenDates(self, entityId, fromDate, toDate):
cursor = mysql_conn.cursor()
months = self.monthsBetweenDates(fromDate, toDate)
selectStmt = """SELECT count(*)
FROM assocEntityArticle a, articles b
WHERE a.article_id = b.article_id
AND b.date BETWEEN %s AND %s
AND a.entity_id=%s"""
associations = []
if len(months) != 0:
for month in months:
fromDateString = self.getStringDate(month)
toDateString = self.getStringDate(month.addMonths(1))
data = (fromDateString, toDateString, entityId)
cursor.execute(selectStmt, data)
count = cursor.fetchone()[0]
associations.append((month, count))
return associations
def selectCountClassifiedAssociations(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM assocEntityArticle
WHERE polarity_calculated IS NOT NULL
OR polarity_manual IS NOT NULL"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectManualPolaritiesForEntity(self, entityId):
cursor = mysql_conn.cursor()
selectStmt = """SELECT article_id, polarity_manual
FROM assocEntityArticle
WHERE polarity_manual IS NOT NULL
AND entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
return rows
def selectAllPolaritiesForEntity(self, entityId, fromDate, toDate):
cursor = mysql_conn.cursor()
months = self.monthsBetweenDates(fromDate, toDate)
selectStmt = """SELECT a.polarity_manual, a.polarity_calculated
FROM assocEntityArticle a, articles b
WHERE (a.polarity_manual IS NOT NULL
OR a.polarity_calculated IS NOT NULL)
AND a.article_id = b.article_id
AND b.date BETWEEN %s AND %s
AND a.entity_id=%s"""
polarities = []
if len(months) != 0:
for month in months:
fromDateString = self.getStringDate(month)
toDateString = self.getStringDate(month.addMonths(1))
data = (fromDateString, toDateString, entityId)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
rows = [a or b for a, b in rows]
polarities.append((month, len(rows), rows))
return polarities
def doAssociationsForEntity(self, entity):
cursor = mysql_conn.cursor()
# select entity_id for entity given as parameter
entityId = self.selectEntityId(entity)
# select list of article_id for which associations exist
# in database for entity given as param
selectStmt = """SELECT article_id
FROM assocEntityArticle
WHERE entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
articleIdsInDB = cursor.fetchall()
articleIdsInDB = [pair[0] for pair in articleIdsInDB]
# select all articles that contain entity in content
selectStmt = """SELECT article_id
FROM articles
WHERE content LIKE %s"""
data = ("%" + entity + "%",)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
rows = [pair[0] for pair in rows]
# find articles for which associations don't exist in the database
diff = list(set(rows) - set(articleIdsInDB))
if len(diff) != 0:
insertStmt = """INSERT INTO assocEntityArticle (article_id, entity_id)
VALUES (%s, %s)"""
data = [(articleId, entityId) for articleId in diff]
cursor.executemany(insertStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def deleteAssciationsForEntity(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (entity,)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()[0]
deleteStmt = """DELETE FROM assocEntityArticle
WHERE entity_id=%s"""
data = (entityId,)
cursor.execute(deleteStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def doAllAssociations(self):
cursor = mysql_conn.cursor()
entities = self.selectAllEntities()
for entity in entities:
self.doAssociationsForEntity(entity)
self.articleInfoUpdate.emit()
def deleteAllAssociations(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM assocEntityArticle
WHERE article_id > 0"""
cursor.execute(deleteStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def clearAllCalculatedPolarities(self):
cursor = mysql_conn.cursor()
updateStmt = """UPDATE assocEntityArticle
SET polarity_calculated=%s
WHERE polarity_calculated IS NOT NULL"""
data = (None,)
cursor.execute(updateStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def deleteAllArticles(self):
try:
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM articles
WHERE article_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE articles AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
except IntegrityError:
pass
def deleteAllAuthors(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM authors
WHERE author_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE authors AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
def deleteAllArticlesAndAuthors(self):
self.deleteAllArticles()
self.deleteAllAuthors()
def deleteAllEntities(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM entities
WHERE entity_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE entities AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
self.entityUpdate.emit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
| gpl-2.0 |
akrizhevsky/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
Alexx-G/openface | evaluation/lfw-classification-unknown.py | 4 | 19062 | #!/usr/bin/env python2
#
# This files can be used to benchmark different classifiers
# on lfw dataset with known and unknown dataset.
# More info at: https://github.com/cmusatyalab/openface/issues/144
# Brandon Amos & Vijayenthiran Subramaniam
# 2016/06/28
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import shutil # For copy images
import errno
import sys
import operator
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from nolearn.dbn import DBN
import multiprocessing
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
sys.path.append('./util/')
align_dlib = __import__('align-dlib')
# The list of available classifiers. The list is used in train() and
# inferFromTest() functions.
clfChoices = [
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN']
def train(args):
start = time.time()
for clfChoice in clfChoices:
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if clfChoice == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif clfChoice == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif clfChoice == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif clfChoice == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif clfChoice == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif clfChoice == 'DBN':
if args.verbose:
verbose = 1
else:
verbose = 0
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=verbose)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = os.path.join(args.workDir, clfChoice + ".pkl")
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
if args.verbose:
print(
"Training and saving the classifiers took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if (bb is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(
"Neural network forward pass took {} seconds.".format(
time.time() - start))
return rep
def inferFromTest(args):
for clfChoice in clfChoices:
print "==============="
print "Using the classifier: " + clfChoice
with open(os.path.join(args.featureFolder[0], clfChoice + ".pkl"), 'r') as f_clf:
(le, clf) = pickle.load(f_clf)
correctPrediction = 0
inCorrectPrediction = 0
sumConfidence = 0.0
testSet = [
os.path.join(
args.testFolder[0], f) for f in os.listdir(
args.testFolder[0]) if not f.endswith('.DS_Store')]
for personSet in testSet:
personImages = [os.path.join(personSet, f) for f in os.listdir(
personSet) if not f.endswith('.DS_Store')]
for img in personImages:
if args.verbose:
print("\n=== {} ===".format(img.split('/')[-1:][0]))
try:
rep = getRep(img).reshape(1, -1)
except Exception as e:
print e
continue
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print(
"Prediction took {} seconds.".format(
time.time() - start))
if args.verbose:
print(
"Predict {} with {:.2f} confidence.".format(
person, confidence))
sumConfidence += confidence
if confidence <= args.threshold and args.unknown:
person = "_unknown"
if (img.split('/')[-1:][0].split('.')[0][:-5] == person and not args.unknown) or (person == "_unknown" and args.unknown):
correctPrediction += 1
else:
inCorrectPrediction += 1
if isinstance(clf, GMM) and args.verbose:
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
print "Results for the classifier: " + clfChoice
print "Correct Prediction :" + str(correctPrediction)
print "In-correct Prediction: " + str(inCorrectPrediction)
print "Accuracy :" + str(float(correctPrediction) / (correctPrediction + inCorrectPrediction))
print "Avg Confidence: " + str(float(sumConfidence) / (correctPrediction + inCorrectPrediction))
def preprocess(args):
start = time.time()
lfwPath = args.lfwDir
destPath = args.featuresDir
fullFaceDirectory = [os.path.join(lfwPath, f) for f in os.listdir(
lfwPath) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
noOfImages = []
folderName = []
for folder in fullFaceDirectory:
try:
noOfImages.append(len(os.listdir(folder)))
folderName.append(folder.split('/')[-1:][0])
# print folder.split('/')[-1:][0] +": " +
# str(len(os.listdir(folder)))
except:
pass
# Sorting
noOfImages_sorted, folderName_sorted = zip(
*sorted(zip(noOfImages, folderName), key=operator.itemgetter(0), reverse=True))
with open(os.path.join(destPath, "List_of_folders_and_number_of_images.txt"), "w") as text_file:
for f, n in zip(folderName_sorted, noOfImages_sorted):
text_file.write("{} : {} \n".format(f, n))
if args.verbose:
print "Sorting lfw dataset took {} seconds.".format(time.time() - start)
start = time.time()
# Copy known train dataset
for i in range(int(args.rangeOfPeople.split(':')[0]), int(
args.rangeOfPeople.split(':')[1])):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'train_known_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print "Copying train dataset from lfw took {} seconds.".format(time.time() - start)
start = time.time()
# Take 10% images from train dataset as test dataset for known
train_known_raw = [
os.path.join(
os.path.join(
destPath,
'train_known_raw'),
f) for f in os.listdir(
os.path.join(
destPath,
'train_known_raw')) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
for folder in train_known_raw:
images = [os.path.join(folder, f) for f in os.listdir(
folder) if not f.endswith('.DS_Store')]
if not os.path.exists(os.path.join(
destPath, 'test_known_raw', folder.split('/')[-1:][0])):
os.makedirs(os.path.join(destPath, 'test_known_raw',
folder.split('/')[-1:][0]))
# print "Created {}".format(os.path.join(destPath,
# 'test_known_raw', folder.split('/')[-1:][0]))
for i in range(int(0.9 * len(images)), len(images)):
destFile = os.path.join(destPath, 'test_known_raw', folder.split(
'/')[-1:][0], images[i].split('/')[-1:][0])
try:
shutil.move(images[i], destFile)
except:
pass
if args.verbose:
print "Spliting lfw dataset took {} seconds.".format(time.time() - start)
start = time.time()
# Copy unknown test dataset
for i in range(int(args.rangeOfPeople.split(':')
[1]), len(folderName_sorted)):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'test_unknown_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print "Copying test dataset from lfw took {} seconds.".format(time.time() - start)
start = time.time()
class Args():
"""
This class is created to pass arguments to ./util/align-dlib.py
"""
def __init__(self, inputDir, outputDir, verbose):
self.inputDir = inputDir
self.dlibFacePredictor = os.path.join(
dlibModelDir, "shape_predictor_68_face_landmarks.dat")
self.mode = 'align'
self.landmarks = 'outerEyesAndNose'
self.size = 96
self.outputDir = outputDir
self.skipMulti = True
self.verbose = verbose
self.fallbackLfw = False
argsForAlign = Args(
os.path.join(
destPath,
'train_known_raw'),
os.path.join(
destPath,
'train_known_aligned'),
args.verbose)
jobs = []
for i in range(8):
p = multiprocessing.Process(
target=align_dlib.alignMain, args=(
argsForAlign,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
if args.verbose:
print "Aligning the raw train data took {} seconds.".format(time.time() - start)
start = time.time()
os.system(
'./batch-represent/main.lua -outDir ' +
os.path.join(
destPath,
'train_known_features') +
' -data ' +
os.path.join(
destPath,
'train_known_aligned'))
if args.verbose:
print "Extracting features from aligned train data took {} seconds.".format(time.time() - start)
start = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferFromTestParser = subparsers.add_parser(
'inferFromTest',
help='Predict who an image contains from a trained classifier.')
# inferFromTestParser.add_argument('--classifierModel', type=str,
# help='The Python pickle representing the classifier. This is NOT the
# Torch network model, which can be set with --networkModel.')
inferFromTestParser.add_argument(
'featureFolder',
type=str,
nargs='+',
help="Input the fratures folder which has the classifiers.")
inferFromTestParser.add_argument(
'testFolder',
type=str,
nargs='+',
help="Input the test folder. It can be either known test dataset or unknown test dataset.")
inferFromTestParser.add_argument(
'--threshold',
type=float,
nargs='+',
help="Threshold of the confidence to classify a prediction as unknown person. <threshold will be predicted as unknown person.",
default=0.0)
inferFromTestParser.add_argument(
'--unknown',
action='store_true',
help="Use this flag if you are testing on unknown dataset. Make sure you set thresold value")
preprocessParser = subparsers.add_parser(
'preprocess',
help='Before Benchmarking preprocess divides the dataset into train and test pairs. Also it will align the train dataset and extract the features from it.')
preprocessParser.add_argument('--lfwDir', type=str,
help="Enter the lfw face directory")
preprocessParser.add_argument(
'--rangeOfPeople',
type=str,
help="Range of the people you would like to take as known person group. Not that the input is a list starts with 0 and the people are sorted in decending order of number of images. Eg: 0:10 ")
preprocessParser.add_argument(
'--featuresDir',
type=str,
help="Enter the directory location where the aligned images, features, and classifer model will be saved.")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
# infer(args)
raise Exception("Use ./demo/classifier.py")
elif args.mode == 'inferFromTest':
inferFromTest(args)
elif args.mode == 'preprocess':
preprocess(args)
| apache-2.0 |
maheshakya/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 3 | 18096 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# Deprecation of Ward class
assert_warns(DeprecationWarning, Ward).fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
"""
Check that we obtain the correct solution for structured linkage trees.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
"""
Check that we obtain the correct solution for unstructured linkage trees.
"""
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
"""
Check that the height of the results of linkage tree is sorted.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
"""
Check that we obtain the correct number of clusters with
agglomerative clustering.
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=mkdtemp(),
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
assert_warns(DeprecationWarning, WardAgglomeration)
with ignore_warnings():
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_array_equal(agglo.labels_, ward.labels_)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
"""
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
connectivity = kneighbors_graph(X, 10)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
"""
Check that children are ordered in the same way for both structured and
unstructured versions of ward_tree.
"""
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
"""Test return_distance option on linkage and ward trees"""
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
"""Test that the full tree is computed if n_clusters is small"""
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
mlyundin/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
abhisg/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
brockk/clintrials | clintrials/stats.py | 1 | 6647 | __author__ = 'Kristian Brock'
__contact__ = '[email protected]'
""" Classes and methods to perform general useful statistical routines. """
from collections import OrderedDict
import logging
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import gaussian_kde, chi2, norm
from scipy.optimize import fsolve
def bootstrap(x):
""" Bootstrap sample a list.
:param x: sample observations
:type x: list
:return: bootstrap sample
:rtype: numpy.array
"""
return np.random.choice(x, size=len(x), replace=1)
def density(x, n_points=100, covariance_factor=0.25):
""" Calculate and plot approximate densoty function from a sample.
:param x: sample observations
:type x: list
:param n_points: number of points in density function to estimate
:type n_points: int
:param covariance_factor: covariance factor in scipy routine, see scipy.stats.gaussian_kde
:type covariance_factor: float
:return: None (yet)
:rtype: None
"""
d = gaussian_kde(x)
xs = np.linspace(min(x), max(x), n_points)
d.covariance_factor = lambda : covariance_factor
d._compute_covariance()
plt.plot(xs, d(xs))
plt.show()
def beta_like_normal(mu, sigma):
""" If X ~ N(mu, sigma^2), get alpha and beta s.t. Y ~ Beta(alpha, beta) has:
E[X] = E[Y] & Var[X] = Var[Y]
This is useful for quickly estimating the effective sample size of a normal prior,
using the principle that the effective sample size of Beta(a, b) is a+b.
:param mu: Mean of a normal r.v.
:type mu: float
:param sigma: Standard deviation of a normal r.v.
:type sigma: float
:return: (alpha, beta) pair of floats
:rtype: tuple
"""
alpha = (mu/sigma)**2 * (1-mu) - mu
beta = ((1-mu)/mu) * alpha
return alpha, beta
def or_test(a, b, c, d, ci_alpha=0.05):
""" Calculate odds ratio and asymptotic confidence interval for events with counts a, b, c, and d.
:param a: Number of observations with positive exposure (e.g. treated) and positive outcome (e.g cured)
:type a: int
:param b: Number of observations with positive exposure (e.g. treated) and negative outcome (e.g not cured)
:type b: int
:param c: Number of observations with negative exposure (e.g. not treated) and positive outcome (e.g cured)
:type c: int
:param d: Number of observations with negative exposure (e.g. not treated) and negative outcome (e.g not cured)
:type d: int
:param ci_alpha: significance for asymptotic confidence ionterval of odds-ratio
:type ci_alpha: float
:return: A dict object with all available statistics
:rtype: collections.OrderedDict
"""
abcd = [a, b, c, d]
to_return = OrderedDict()
to_return['ABCD'] = abcd
if np.any(np.array(abcd) < 0):
logging.error('Negative event count. Garbage!')
elif np.any(np.array(abcd) == 0):
logging.info('At least one event count was zero. Added one to all counts.')
abcd = np.array(abcd) + 1
a,b,c,d = abcd
odds_ratio = 1. * (a * d) / (c * b)
log_or_se = np.sqrt(sum(1. / np.array(abcd)))
ci_scalars = norm.ppf([ci_alpha/2, 1-ci_alpha/2])
or_ci = np.exp(np.log(odds_ratio) + ci_scalars * log_or_se)
to_return['OR'] = odds_ratio
to_return['Log(OR) SE'] = log_or_se
to_return['OR CI'] = list(or_ci)
to_return['Alpha'] = ci_alpha
return to_return
def chi_squ_test(x, y, x_positive_value=None, y_positive_value=None, ci_alpha=0.05):
""" Run a chi-squared test for association between x and y.
:param x:
:type x: list
:param y:
:type y: list
:param x_positive_value: item in x corresponding to positive event, 1 by default
:type x_positive_value: object
:param y_positive_value: item in y corresponding to positive event, 1 by default
:type y_positive_value: object
:param ci_alpha: significance for asymptotic confidence ionterval of odds-ratio
:type ci_alpha: float
:return: A dict object with all available statistics
:rtype: collections.OrderedDict
"""
sum_oe = 0.0
x_set = set(x)
y_set = set(y)
for x_case in x_set:
x_matches = [z == x_case for z in x]
for y_case in y_set:
y_matches = [z == y_case for z in y]
obs = sum(np.array(x_matches) & np.array(y_matches))
exp = 1. * sum(x_matches) * sum(y_matches) / len(x)
oe = (obs - exp)**2 / exp
sum_oe += oe
num_df = (len(x_set)-1) * (len(y_set)-1)
p = 1-chi2.cdf(sum_oe, num_df)
to_return = OrderedDict([('TestStatistic', sum_oe), ('p', p), ('Df', num_df)])
if len(x_set) == 2 and len(y_set)==2:
x = np.array(x)
y = np.array(y)
if not x_positive_value:
x_positive_value=1
if not y_positive_value:
y_positive_value=1
x_pos_val, y_pos_val = x_positive_value, y_positive_value
a, b, c, d = (sum((x == x_pos_val) & (y == y_pos_val)), sum((x == x_pos_val) & (y != y_pos_val)),
sum((x != x_pos_val) & (y == y_pos_val)), sum((x != x_pos_val) & (y != y_pos_val)))
to_return['Odds'] = or_test(a, b, c, d, ci_alpha=ci_alpha)
else:
# There's no reason why the OR logic could not be calculated for each combination pair
# in x and y, but it's more work so leave it for now.
pass
return to_return
class ProbabilityDensitySample:
def __init__(self, samp, func):
self._samp = samp
self._probs = func(samp)
self._scale = self._probs.mean()
def expectation(self, vector):
return np.mean(vector * self._probs / self._scale)
def variance(self, vector):
exp = self.expectation(vector)
exp2 = self.expectation(vector**2)
return exp2 - exp**2
def cdf(self, i, y):
""" Get the cumulative density of the parameter in position i that is less than y. """
return self.expectation(self._samp[:,i]<y)
def quantile(self, i, p, start_value=0.1):
""" Get the value of the parameter at position i for which p of the probability mass is in the left-tail. """
return fsolve(lambda z: self.cdf(i, z) - p, start_value)[0]
def cdf_vector(self, vector, y):
""" Get the cumulative density of sample vector that is less than y. """
return self.expectation(vector < y)
def quantile_vector(self, vector, p, start_value=0.1):
""" Get the value of a vector for which p of the probability mass is in the left-tail. """
return fsolve(lambda z: self.cdf_vector(vector, z) - p, start_value)[0] | gpl-3.0 |
IndraVikas/scikit-learn | sklearn/linear_model/least_angle.py | 57 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/linear_model/sag.py | 14 | 11269 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criteria is not reached. Defaults to 1000.
tol: double, optional
The stopping criteria for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/document
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
potash/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
cmunk/protwis | build/management/commands/build_drugs.py | 3 | 3830 | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from common.models import WebResource, WebLink
from protein.models import Protein
from drugs.models import Drugs
from optparse import make_option
import logging
import csv
import os
import pandas as pd
class Command(BaseCommand):
help = 'Build Drug Data'
def add_arguments(self, parser):
parser.add_argument('--filename', action='append', dest='filename',
help='Filename to import. Can be used multiple times')
logger = logging.getLogger(__name__)
# source file directory
drugdata_data_dir = os.sep.join([settings.DATA_DIR, 'drug_data'])
def handle(self, *args, **options):
if options['filename']:
filenames = options['filename']
else:
filenames = False
try:
self.purge_drugs()
self.create_drug_data(filenames)
except Exception as msg:
print(msg)
self.logger.error(msg)
def purge_drugs(self):
try:
Drugs.objects.all().delete()
except Drugs.DoesNotExist:
self.logger.warning('Drugs mod not found: nothing to delete.')
def create_drug_data(self, filenames=False):
self.logger.info('CREATING DRUGDATA')
# read source files
if not filenames:
filenames = [fn for fn in os.listdir(self.drugdata_data_dir) if fn.endswith('drug_data.csv')]
for filename in filenames:
filepath = os.sep.join([self.drugdata_data_dir, filename])
data = pd.read_csv(filepath, low_memory=False, encoding = "ISO-8859-1")
for index, row in enumerate(data.iterrows()):
drugname = data[index:index+1]['Drug Name'].values[0]
trialname = data[index:index+1]['Trial name'].values[0]
drugalias_raw = data[index:index+1]['DrugAliases'].values[0]
drugalias = ['' if str(drugalias_raw) == 'nan' else ', '+str(drugalias_raw)]
# trialadd = ['' if str(trialname) == drugname else ' ('+str(trialname)+')']
drugname = drugname + drugalias[0]
entry_name = data[index:index+1]['EntryName'].values[0]
phase = data[index:index+1]['Phase'].values[0]
PhaseDate = data[index:index+1]['PhaseDate'].values[0]
ClinicalStatus = data[index:index+1]['ClinicalStatus'].values[0]
moa = data[index:index+1]['ModeOfAction'].values[0]
targetlevel = data[index:index+1]['TargetCategory'].values[0]
drugtype = data[index:index+1]['Drug Class'].values[0]
indication = data[index:index+1]['Indication(s)'].values[0]
novelty = data[index:index+1]['Target_novelty'].values[0]
approval = data[index:index+1]['Approval'].values[0]
status = data[index:index+1]['Status'].values[0]
references = data[index:index+1]['PMID'].values[0]
# fetch protein
try:
p = Protein.objects.get(entry_name=entry_name)
except Protein.DoesNotExist:
self.logger.error('Protein not found for entry_name {}'.format(entry_name))
continue
drug, created = Drugs.objects.get_or_create(name=drugname, synonym=', '.join(drugalias), drugtype=drugtype, indication=indication, novelty=novelty, approval=approval, phase=phase, phasedate=PhaseDate, clinicalstatus=ClinicalStatus, moa=moa, status=status, targetlevel=targetlevel,references=references)
drug.target.add(p)
drug.save()
# target_list = drug.target.all()
self.logger.info('COMPLETED CREATING DRUGDATA')
| apache-2.0 |
jjx02230808/project0223 | sklearn/decomposition/tests/test_dict_learning.py | 67 | 9084 | import numpy as np
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
jameshuang/mapnoise | setup.py | 1 | 1529 | from distutils.core import setup
import py2exe
import FileDialog
from distutils.filelist import findall
import os
import glob
import matplotlib
def find_data_files(source,target,patterns):
"""Locates the specified data-files and returns the matches
in a data_files compatible format.
source is the root of the source data tree.
Use '' or '.' for current directory.
target is the root of the target data tree.
Use '' or '.' for the distribution directory.
patterns is a sequence of glob-patterns for the
files you want to copy.
"""
if glob.has_magic(source) or glob.has_magic(target):
raise ValueError("Magic not allowed in src, target")
ret = {}
for pattern in patterns:
pattern = os.path.join(source,pattern)
for filename in glob.glob(pattern):
if os.path.isfile(filename):
targetpath = os.path.join(target,os.path.relpath(filename,source))
path = os.path.dirname(targetpath)
ret.setdefault(path,[]).append(filename)
return sorted(ret.items())
my_data_files = find_data_files('','',[
'images/*',
'sample-data/*/*',
])
#my_data_files=[('images', ['images\svmap.png'])]
my_data_files += matplotlib.get_py2exe_datafiles()
setup(
console=['mapnoise.py'],
options={
'py2exe': {
'packages' : ['matplotlib', 'pytz'],
}
},
data_files=my_data_files
)
| mit |
fabianvaccaro/pygums | pythonLibs/pymorph-0.96/pymorph/compat.py | 3 | 20493 | import warnings
warnings.warn('pymorph.compat should be replaced with pymorph', DeprecationWarning)
from notimplemented import *
from mmorph import *
from text import *
# old abbreviations:
clohole=close_holes
ero=erode
cero=cerode
dil=dilate
cdil=cdilate
sedil=sedilate
add4dil=add4dilate
uint8=to_uint8
uint16=to_uint16
int32=to_int32
glblshow=randomcolor
randomcolour=randomcolor
# mmnames:
def _not_implemented(msg):
def f(*args, **kwargs):
raise NotImplementedError, msg
f.__doc__ = '''\
This function is not implemented anymore.
%s''' % msg
return f
mmadd4dil=add4dil
mmaddm=addm
mmareaclose=areaclose
mmareaopen=areaopen
mmasf=asf
mmasfrec=asfrec
mmbinary=binary
mmblob=blob
mmbshow=bshow
mmcbisector=cbisector
mmcdil=cdil
mmcenter=center
mmcero=cero
mmclohole=clohole
mmclose=close
mmcloserec=closerec
mmcloserecth=closerecth
mmcloseth=closeth
mmconcat=concat
mmcthick=cthick
mmcthin=cthin
mmcwatershed=cwatershed
mmdatatype=datatype
mmdil=dil
mmdist=dist
mmdrawv=drawv
mmdtshow=_not_implemented('dtshow: use matplotlib')
mmedgeoff=edgeoff
mmero=ero
mmflood=flood
mmframe=frame
mmgdist=gdist
mmgdtshow=_not_implemented('gdtshow: use matplotlib')
mmgradm=gradm
mmgrain=grain
mmgray=gray
mmhistogram=histogram
mmhmax=hmax
mmhmin=hmin
mmhomothick=homothick
mmhomothin=homothin
mmimg2se=img2se
mminfcanon=infcanon
mminfgen=infgen
mminfrec=infrec
mminpos=inpos
mminterot=interot
mmintersec=intersec
mmintershow=intershow
mmisbinary=isbinary
mmisequal=isequal
mmlabel=label
mmlabelflat=labelflat
mmlastero=lastero
mmlblshow=_not_implemented('lblshow: use matplotlib')
mmlimits=limits
mmmat2set=mat2set
mmmaxleveltype=maxleveltype
mmneg=neg
mmopen=open
mmopenrec=openrec
mmopenrecth=openrecth
mmopenth=openth
mmopentransf=opentransf
mmpad4n=pad4n
mmpatspec=patspec
mmreadgray=_not_implemented('readgray: use PIL or readmagick')
mmregmax=regmax
mmregmin=regmin
mmse2hmt=se2hmt
mmse2interval=se2interval
mmsebox=sebox
mmsecross=secross
mmsedil=sedil
mmsedisk=sedisk
mmseline=seline
mmsereflect=sereflect
mmserot=serot
mmseshow=seshow
mmsesum=sesum
mmset2mat=set2mat
mmsetrans=setrans
mmseunion=seunion
mmskelm=skelm
mmskelmrec=skelmrec
mmskiz=skiz
mmsubm=subm
mmsupcanon=supcanon
mmsupgen=supgen
mmsuprec=suprec
mmswatershed=swatershed
mmsymdif=symdiff
mmtext=text
mmthick=thick
mmthin=thin
mmthreshad=threshad
mmtoggle=toggle
mmunion=union
mmvmax=vmax
mmwatershed=watershed
gshow=overlay
gdtshow=isolines
# Functions which were removed:
def mminstall(*args):
pass
def mmversion(*args):
pass
def mmregister(*args):
pass
def mmcmp(f1, oper, f2, oper1=None, f3=None):
"""
- Alternative:
Consider using array operations directly, i.e., instead of
mmcmp(f1, '>', f2)
simply use
f1 > f2
- Purpose
Compare two images pixelwisely.
- Synopsis
y = mmcmp(f1, oper, f2, oper1=None, f3=None)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
oper: String Default: "". relationship from: '==', '~=',
'<','<=', '>', '>='.
f2: Gray-scale (uint8 or uint16) or binary image.
oper1: String Default: None. relationship from: '==', '~=',
'<','<=', '>', '>='.
f3: Gray-scale (uint8 or uint16) or binary image. Default:
None.
- Output
y: Binary image.
- Description
Apply the relation oper to each pixel of images f1 and f2 , the
result is a binary image with the same size. Optionally, it is
possible to make the comparison among three image. It is
possible to use a constant value in place of any image, in this
case the constant is treated as an image of the same size as the
others with all pixels with the value of the constant.
- Examples
#
# example 1
#
print cmp(to_uint8([1, 2, 3]),'<', to_uint8(2))
print cmp(to_uint8([1, 2, 3]),'<', to_uint8([0, 2, 4]))
print cmp(to_uint8([1, 2, 3]),'==', to_uint8([1, 1, 3]))
#
# example 2
#
f=readgray('keyb.tif')
fbin=cmp(to_uint8(10), '<', f, '<', to_uint8(50))
show(f)
show(fbin)
"""
if oper == '==': y = (f1==f2)
elif oper == '~=': y = (f1!=f2)
elif oper == '<=': y = (f1<=f2)
elif oper == '>=': y = (f1>=f2)
elif oper == '>': y = (f1> f2)
elif oper == '<': y = (f1< f2)
else:
assert 0, 'oper must be one of: ==, ~=, >, >=, <, <=, it was:'+oper
if oper1 != None:
if oper1 == '==': y = intersec(y, f2==f3)
elif oper1 == '~=': y = intersec(y, f2!=f3)
elif oper1 == '<=': y = intersec(y, f2<=f3)
elif oper1 == '>=': y = intersec(y, f2>=f3)
elif oper1 == '>': y = intersec(y, f2> f3)
elif oper1 == '<': y = intersec(y, f2< f3)
else:
assert 0, 'oper1 must be one of: ==, ~=, >, >=, <, <=, it was:'+oper1
y = binary(y)
return y
def mmvdome(f, v=1, Bc=None):
"""
- Purpose
Obsolete, use vmax.
- Synopsis
y = mmvdome(f, v=1, Bc=None)
- Input
f: Gray-scale (uint8 or uint16) image.
v: Default: 1. Volume parameter.
Bc: Structuring Element Default: None (3x3 elementary cross).
Structuring element (connectivity).
- Output
y: Gray-scale (uint8 or uint16) or binary image.
- Description
The correct name for this operator mmvdome is vmax.
"""
if Bc is None: Bc = secross()
y = hmax(f,v,Bc);
return y
def mmis(f1, oper, f2=None, oper1=None, f3=None):
"""
- Alternative
Consider using array operations or isbinary()
- Purpose
Verify if a relationship among images is true or false.
- Synopsis
y = mmis(f1, oper, f2=None, oper1=None, f3=None)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
oper: String relationship from: '==', '~=', '<','<=', '>',
'>=', 'binary', 'gray'.
f2: Gray-scale (uint8 or uint16) or binary image. Default:
None.
oper1: String Default: None. relationship from: '==', '~=',
'<','<=', '>', '>='.
f3: Gray-scale (uint8 or uint16) or binary image. Default:
None.
- Output
y: Bool value: 0 or 1
- Description
Verify if the property or relatioship between images is true or
false. The result is true if the relationship is true for all
the pixels in the image, and false otherwise. (Obs: This
function replaces is equal, is lesseq, is binary ).
- Examples
#
fbin=binary([0, 1])
f1=to_uint8([1, 2, 3])
f2=to_uint8([2, 2, 3])
f3=to_uint8([2, 3, 4])
mmis(fbin,'binary')
mmis(f1,'gray')
mmis(f1,'==',f2)
mmis(f1,'<',f3)
mmis(f1,'<=',f2)
mmis(f1,'<=',f2,'<=',f3)
"""
from string import upper
if f2 == None:
oper=upper(oper);
if oper == 'BINARY': return isbinary(f1)
elif oper == 'GRAY' : return not isbinary(f1)
else:
assert 0,'oper should be BINARY or GRAY, was'+oper
elif oper == '==': y = isequal(f1, f2)
elif oper == '~=': y = not isequal(f1,f2)
elif oper == '<=': y = mmislesseq(f1,f2)
elif oper == '>=': y = mmislesseq(f2,f1)
elif oper == '>': y = isequal(neg(threshad(f2,f1)),binary(1))
elif oper == '<': y = isequal(neg(threshad(f1,f2)),binary(1))
else:
assert 0,'oper must be one of: ==, ~=, >, >=, <, <=, it was:'+oper
if oper1 != None:
if oper1 == '==': y = y and isequal(f2,f3)
elif oper1 == '~=': y = y and (not isequal(f2,f3))
elif oper1 == '<=': y = y and mmislesseq(f2,f3)
elif oper1 == '>=': y = y and mmislesseq(f3,f2)
elif oper1 == '>': y = y and isequal(neg(threshad(f3,f2)),binary(1))
elif oper1 == '<': y = y and isequal(neg(threshad(f2,f3)),binary(1))
else:
assert 0,'oper1 must be one of: ==, ~=, >, >=, <, <=, it was:'+oper1
return y
def mmislesseq(f1, f2, MSG=None):
"""
- Alternative
Consider using f1 <= f2
- Purpose
Verify if one image is less or equal another (is beneath)
- Synopsis
bool = mmislesseq(f1, f2)
- Input
f1: Gray-scale (uint8 or uint16) or binary image.
f2: Gray-scale (uint8 or uint16) or binary image.
- Output
bool: Boolean
- Description
mmislesseq compares the images f1 and f2 and returns true (1),
if f1(x) <= f2(x) , for every pixel x, and false (0), otherwise.
- Examples
#
f1 = to_uint8([0, 1, 2, 3])
f2 = to_uint8([9, 5, 3, 3])
print mmislesseq(f1,f2)
print mmislesseq(f2,f1)
print mmislesseq(f1,f1)
"""
from numpy import ravel
bool = min(ravel(f1<=f2))
return bool
def mmstats(f, measurement):
"""
- Purpose
Find global image statistics.
- Synopsis
y = mmstats(f, measurement)
- Input
f:
measurement: String Default: "". Choose the measure to compute:
'max', 'min', 'median', 'mean', 'sum', 'std',
'std1'.
- Output
y:
- Description
Compute global image statistics: 'max' - maximum gray-scale
value in image; 'min' - minimum gray-scale value in image; 'sum'
- sum of all pixel values; 'median' - median value of all pixels
in image; 'mean' - mean value of all pixels in image; 'std' -
standard deviation of all pixels (normalized by N-1); 'std1' -
idem, normalized by N.
"""
from string import upper
from numpy import ravel
from numpy.oldnumeric.mlab import mean, median, std
measurement = upper(measurement)
if measurement == 'MAX': return f.max()
elif measurement == 'MIN': return f.min()
elif measurement == 'MEAN': return f.mean()
elif measurement == 'MEDIAN': return f.median()
elif measurement == 'STD': return f.std()
else:
assert 0,'pymorph.compat.mmstats: Not a valid measurement'
def mmsurf(f,options = None):
return f
_figs = [None]
def plot(plotitems=[], options=[], outfig=-1, filename=None):
"""
- Purpose
Plot a function.
- Synopsis
fig = plot(plotitems=[], options=[], outfig=-1, filename=None)
- Input
plotitems: Default: []. List of plotitems.
options: Default: []. List of options.
outfig: Default: -1. Integer. Figure number. 0 creates a new
figure.
filename: Default: None. String. Name of the PNG output file.
- Output
fig: Figure number.
- Examples
#
import numpy
#
x = numpy.arange(0, 2*numpy.pi, 0.1)
plot([[x]])
y1 = numpy.sin(x)
y2 = numpy.cos(x)
opts = [['title', 'Example Plot'],\
['grid'],\
['style', 'linespoints'],\
['xlabel', '"X values"'],\
['ylabel', '"Y Values"']]
y1_plt = [x, y1, None, 'sin(X)']
y2_plt = [x, y2, 'lines', 'cos(X)']
#
# plotting two graphs using one step
fig1 = plot([y1_plt, y2_plt], opts, 0)
#
# plotting the same graphs using two steps
fig2 = plot([y1_plt], opts, 0)
fig2 = plot([y2_plt], opts, fig2)
#
# first function has been lost, lets recover it
opts.append(['replot'])
fig2 = plot([y1_plt], opts, fig2)
"""
import Gnuplot
import numpy
newfig = 0
if (plotitems == 'reset'):
_figs[0] = None
_figs[1:] = []
return 0
if len(plotitems) == 0:
# no plotitems specified: replot current figure
if _figs[0]:
outfig = _figs[0]
g = _figs[outfig]
g.replot()
return outfig
else:
#assert 0, "plot error: There is no current figure\n"
print "plot error: There is no current figure\n"
return 0
# figure to be plotted
if ((outfig < 0) and _figs[0]):
# current figure
outfig = _figs[0]
elif ( (outfig == 0) or ( (outfig == -1) and not _figs[0] ) ):
# new figure
newfig = 1
outfig = len(_figs)
elif outfig >= len(_figs):
#assert 0, 'plot error: Figure ' + str(outfig) + 'does not exist\n'
print 'plot error: Figure ' + str(outfig) + 'does not exist\n'
return 0
#current figure
_figs[0] = outfig
# Gnuplot pointer
if newfig:
if len(_figs) > 20:
print '''plot error: could not create figure. Too many PlotItems in memory (20). Use
plot('reset') to clear table'''
return 0
g = Gnuplot.Gnuplot()
_figs.append(g)
else:
g = _figs[outfig]
# options
try:
options.remove(['replot'])
except:
g.reset()
try:
#default style
g('set data style lines')
for option in options:
if option[0] == 'grid':
g('set grid')
elif option[0] == 'title':
g('set title "' + option[1] + '"')
elif option[0] == 'xlabel':
g('set xlabel ' + option[1])
elif option[0] == 'ylabel':
g('set ylabel ' + option[1])
elif option[0] == 'style':
g('set data style ' + option[1])
else:
print "plot warning: Unknown option: " + option[0]
except:
print "plot warning: Bad usage in options! Using default values. Please, use help.\n"
# Plot items: item[0]=x, item[1]=y, item[2]=style
for item in plotitems:
try:
title = None
style = None
x = numpy.ravel(item[0])
if len(item) > 1:
# y axis specified
y = numpy.ravel(item[1])
if len(item) > 2:
# style specified
style = item[2]
if len(item) > 3:
title = item[3]
else:
# no y axis specified
y = x
x = numpy.arange(len(y))
g.replot(Gnuplot.Data(x, y, title=title, with_=style))
except:
g.reset()
if newfig:
_figs.pop()
#assert 0, "plot error: Bad usage in plotitems! Impossible to plot graph. Please, use help.\n"
print "plot error: Bad usage in plotitems! Impossible to plot graph. Please, use help.\n"
return 0
# PNG file
if filename:
g.hardcopy(filename, terminal='png', color=1)
fig = outfig
return fig
mmplot=plot
def mmwatershed(f,Bc=None,linereg='LINES'):
return watershed(f,Bc,(linereg == 'LINES'))
def mmcwatershed(f,Bc=None,linereg='LINES'):
return cwatershed(f,Bc,(linereg == 'LINES'))
def mmskiz(f,Bc=None,LINEREG='LINES',METRIC=None):
return skiz(f,Bc,(LINEREG=='LINES'),METRIC)
def mmdist(f,Bc=None,METRIC=None):
return dist(f,Bc,metric=METRIC)
def mmendpoints(OPTION='LOOP'):
return endpoints(option=OPTION)
def mmgshow(X, X1=None, X2=None, X3=None, X4=None, X5=None, X6=None):
"""
- Purpose
Apply binary overlays as color layers on a binary or gray-scale
image
- Synopsis
Y = gshow(X, X1=None, X2=None, X3=None, X4=None, X5=None,
X6=None)
- Input
X: Gray-scale (uint8 or uint16) or binary image.
X1: Binary image. Default: None. Red overlay.
X2: Binary image. Default: None. Green overlay.
X3: Binary image. Default: None. Blue overlay.
X4: Binary image. Default: None. Magenta overlay.
X5: Binary image. Default: None. Yellow overlay.
X6: Binary image. Default: None. Cyan overlay.
- Output
Y: Gray-scale (uint8 or uint16) or binary image.
"""
if isbinary(X): X = gray(X,'uint8')
r = X
g = X
b = X
if X1 is not None: # red 1 0 0
assert isbinary(X1),'X1 must be binary overlay'
x1 = gray(X1,'uint8')
r = union(r,x1)
g = intersec(g,neg(x1))
b = intersec(b,neg(x1))
if X2 is not None: # green 0 1 0
assert isbinary(X2),'X2 must be binary overlay'
x2 = gray(X2,'uint8')
r = intersec(r,neg(x2))
g = union(g,x2)
b = intersec(b,neg(x2))
if X3 is not None: # blue 0 0 1
assert isbinary(X3),'X3 must be binary overlay'
x3 = gray(X3,'uint8')
r = intersec(r,neg(x3))
g = intersec(g,neg(x3))
b = union(b,x3)
if X4 is not None: # magenta 1 0 1
assert isbinary(X4),'X4 must be binary overlay'
x4 = gray(X4,'uint8')
r = union(r,x4)
g = intersec(g,neg(x4))
b = union(b,x4)
if X5 is not None: # yellow 1 1 0
assert isbinary(X5),'X5 must be binary overlay'
x5 = gray(X5,'uint8')
r = union(r,x5)
g = union(g,x5)
b = intersec(b,neg(x5))
if X6 is not None: # cyan 0 1 1
assert isbinary(X6),'X6 must be binary overlay'
x6 = gray(X6,'uint8')
r = intersec(r,neg(x6))
g = union(g,x6)
b = union(b,x6)
return concat('d',r,g,b)
def mmglblshow(X, border=0.0):
"""
- Purpose
Apply a random color table to a gray-scale image.
- Synopsis
Y = glblshow(X, border=0.0)
- Input
X: Gray-scale (uint8 or uint16) image. Labeled image.
border: Boolean Default: 0.0. Labeled image.
- Output
Y: Gray-scale (uint8 or uint16) or binary image.
"""
from numpy import take, resize, shape
from numpy.random import rand
mmin = X.min()
mmax = X.max()
ncolors = mmax - mmin + 1
R = to_int32(rand(ncolors)*255)
G = to_int32(rand(ncolors)*255)
B = to_int32(rand(ncolors)*255)
if mmin == 0:
R[0],G[0],B[0] = 0,0,0
r=resize(take(R, X.ravel() - mmin),X.shape)
g=resize(take(G, X.ravel() - mmin),X.shape)
b=resize(take(B, X.ravel() - mmin),X.shape)
Y=concat('d',r,g,b)
return Y
def readgray(filename):
"""
- Purpose
Read an image from a coercial file format and stores it as a
gray-scale image.
- Synopsis
y = readgray(filename)
- Input
filename: String Name of file to read.
- Output
y: Gray-scale (uint8 or uint16) or binary image.
- Description
readgray reads the image in filename and stores it in y , an
uint8 gray-scale image (without colormap). If the input file is
a color RGB image, it is converted to gray-scale using the
equation: y = 0.2989 R + 0.587 G + 0.114 B. This functions uses
de PIL module.
- Examples
#
a=readgray('cookies.tif')
show(a)
"""
import pylab
import numpy
y = pylab.imread(filename)
if (len(y.shape) == 3) and (y.shape[0] == 3):
if numpy.alltrue(numpy.alltrue(y[0,:,:] == y[1,:,:] and
y[0,:,:] == y[2,:,:])):
y = y[0,:,:]
else:
print 'Warning: converting true-color RGB image to gray'
y = ubyte(0.2989 * y[0,:,:] +
0.5870 * y[1,:,:] +
0.1140 * y[2,:,:])
elif (len(y.shape) == 2):
pass
else:
raise ValueError, 'Error, it is not 2D image'
return y
def freedom(L=5):
"""
DOES NOT DO ANYTHING
"""
return -1
mmfreedom=freedom
| gpl-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/utils/tests/test_weighted_mode.py | 3 | 1095 | import numpy as np
from nose.tools import assert_true
from sklearn.utils.extmath import weighted_mode
from scipy import stats
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_true(np.all(mode == mode_result))
assert_true(np.all(score.ravel() == w[:, :5].sum(1)))
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
devanshdalal/scikit-learn | sklearn/metrics/scorer.py | 33 | 17925 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.model_selection.GridSearchCV` or
:func:`sklearn.model_selection.cross_val_score` as the ``scoring``
parameter, to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, mean_squared_log_error, accuracy_score,
f1_score, roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from .cluster import homogeneity_score
from .cluster import completeness_score
from .cluster import v_measure_score
from .cluster import mutual_info_score
from .cluster import adjusted_mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
# XXX After removing the deprecated scorers (v0.20) remove the
# XXX deprecation_msg property again and remove __call__'s body again
self._deprecation_msg = None
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
if self._deprecation_msg is not None:
warnings.warn(self._deprecation_msg,
category=DeprecationWarning,
stacklevel=2)
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_PredictScorer, self).__call__(estimator, X, y_true,
sample_weight=sample_weight)
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ProbaScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
super(_ThresholdScorer, self).__call__(clf, X, y,
sample_weight=sample_weight)
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
scorers = [scorer for scorer in SCORERS
if SCORERS[scorer]._deprecation_msg is None]
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(scorers)))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
if isinstance(scoring, six.string_types):
return get_scorer(scoring)
elif has_scoring:
# Heuristic to ensure user has not passed a metric
module = getattr(scoring, '__module__', None)
if hasattr(module, 'startswith') and \
module.startswith('sklearn.metrics.') and \
not module.startswith('sklearn.metrics.scorer') and \
not module.startswith('sklearn.metrics.tests.'):
raise ValueError('scoring value %r looks like it is a metric '
'function rather than a scorer. A scorer should '
'require an estimator as its first parameter. '
'Please use `make_scorer` to convert a metric '
'to a scorer.' % scoring)
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
neg_mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_squared_error was renamed to '
'neg_mean_squared_error in version 0.18 and will '
'be removed in 0.20.')
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_squared_error_scorer._deprecation_msg = deprecation_msg
neg_mean_squared_log_error_scorer = make_scorer(mean_squared_log_error,
greater_is_better=False)
neg_mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method mean_absolute_error was renamed to '
'neg_mean_absolute_error in version 0.18 and will '
'be removed in 0.20.')
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
mean_absolute_error_scorer._deprecation_msg = deprecation_msg
neg_median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
deprecation_msg = ('Scoring method median_absolute_error was renamed to '
'neg_median_absolute_error in version 0.18 and will '
'be removed in 0.20.')
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
median_absolute_error_scorer._deprecation_msg = deprecation_msg
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
neg_log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
deprecation_msg = ('Scoring method log_loss was renamed to '
'neg_log_loss in version 0.18 and will be removed in 0.20.')
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
log_loss_scorer._deprecation_msg = deprecation_msg
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
homogeneity_scorer = make_scorer(homogeneity_score)
completeness_scorer = make_scorer(completeness_score)
v_measure_scorer = make_scorer(v_measure_score)
mutual_info_scorer = make_scorer(mutual_info_score)
adjusted_mutual_info_scorer = make_scorer(adjusted_mutual_info_score)
normalized_mutual_info_scorer = make_scorer(normalized_mutual_info_score)
fowlkes_mallows_scorer = make_scorer(fowlkes_mallows_score)
SCORERS = dict(r2=r2_scorer,
neg_median_absolute_error=neg_median_absolute_error_scorer,
neg_mean_absolute_error=neg_mean_absolute_error_scorer,
neg_mean_squared_error=neg_mean_squared_error_scorer,
neg_mean_squared_log_error=neg_mean_squared_log_error_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
neg_log_loss=neg_log_loss_scorer,
# Cluster metrics that use supervised evaluation
adjusted_rand_score=adjusted_rand_scorer,
homogeneity_score=homogeneity_scorer,
completeness_score=completeness_scorer,
v_measure_score=v_measure_scorer,
mutual_info_score=mutual_info_scorer,
adjusted_mutual_info_score=adjusted_mutual_info_scorer,
normalized_mutual_info_score=normalized_mutual_info_scorer,
fowlkes_mallows_score=fowlkes_mallows_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(metric, pos_label=None,
average=average)
| bsd-3-clause |
arhik/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/contour.py | 69 | 42063 | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import division
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as path
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as collections
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ContourLabeler:
'''Mixin to provide labelling capability to ContourSet'''
def clabel(self, *args, **kwargs):
"""
call signature::
clabel(cs, **kwargs)
adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
See http://matplotlib.sf.net/fonts.html
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g. *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string)
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
# Detect if manual selection is desired and remove from argument list
self.labelManual=kwargs.get('manual',False)
if len(args) == 0:
levels = self.levels
indices = range(len(self.levels))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize == None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors == None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = range(len(self.labelLevelList))
self.labelMappable = cm.ScalarMappable(cmap = cmap,
norm = colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if self.labelManual:
print 'Select label locations manually using first mouse button.'
print 'End manual selection with second mouse button.'
if not inline:
print 'Remove last label by clicking third mouse button.'
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline,inline_spacing)
else:
self.labels(inline,inline_spacing)
# Hold on to some old attribute names. These are depricated and will
# be removed in the near future (sometime after 2008-08-01), but keeping
# for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour,labelwidth):
"if contours are too short, don't plot a label"
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return 1
xmax = np.amax(linecontour[:,0])
xmin = np.amin(linecontour[:,0])
ymax = np.amax(linecontour[:,1])
ymin = np.amin(linecontour[:,1])
lw = labelwidth
if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw:
return 1
else:
return 0
def too_close(self, x,y, lw):
"if there's a label already nearby, find a better place"
if self.labelXYs != []:
dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2)
for loc in self.labelXYs]
for d in dist:
if d < 1.2*lw:
return 1
else: return 0
else: return 0
def get_label_coords(self, distances, XX, YY, ysize, lw):
""" labels are ploted at a location with the smallest
dispersion of the contour from a straight line
unless there's another label nearby, in which case
the second best place on the contour is picked up
if there's no good place a label isplotted at the
beginning of the contour
"""
hysize = int(ysize/2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x,y, lw):
continue
else:
return x,y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x,y, ind
def get_label_width(self, lev, fmt, fsize):
"get the width of the label in points"
if cbook.is_string_like(lev):
lw = (len(lev)) * fsize
else:
lw = (len(self.get_text(lev,fmt))) * fsize
return lw
def get_real_label_width( self, lev, fmt, fsize ):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
"""
# Find middle of axes
xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 )
# Temporarily create text object
t = text.Text( xx[0], xx[1] )
self.set_label_props( t, self.get_text(lev,fmt), 'k' )
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2,0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt,dict):
return fmt[lev]
else:
return fmt%lev
def locate_label(self, linecontour, labelwidth):
"""find a good place to plot a label (relatively flat
part of the contour) and the angle of rotation for the
text object
"""
nsize= len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize/labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = labelwidth
XX = np.resize(linecontour[:,0],(xsize, ysize))
YY = np.resize(linecontour[:,1],(xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:,0].reshape(xsize, 1)
ylast = YY[:,-1].reshape(xsize, 1)
xfirst = XX[:,0].reshape(xsize, 1)
xlast = XX[:,-1].reshape(xsize, 1)
s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst)
L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel()
dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1)
x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x,y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None: lc = []
# Half the label width
hlw = lw/2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[ slc[ind:-1], slc[:ind+1] ]
if len(lc): # Rotate lc also if not empty
lc = np.r_[ lc[ind:-1], lc[:ind+1] ]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl-pl[ind]
# Use linear interpolation to get points around label
xi = np.array( [ -hlw, hlw ] )
if closed: # Look at end also for closed contours
dp = np.array([pl[-1],0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi,
extrap=True )
# get vector in pixel space coordinates from one point to other
dd = np.diff( ll, axis=0 ).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd==0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing,spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False )
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0])<>I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[0] ] )
if (not np.isnan(I[1])) and int(I[1])<>I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [ xi[1] ] )
# Make integer
I = [ np.floor(I[0]), np.ceil(I[1]) ]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] )
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append( np.r_[ lc[:I[0]+1], xy1 ] )
if not np.isnan(I[1]):
nlc.append( np.r_[ xy2, lc[I[1]:] ] )
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavoir.
#if not len(nlc): nlc = [ lc ]
return (rotation,nlc)
def add_label(self,x,y,rotation,lev,cvalue):
dx,dy = self.ax.transData.inverted().transform_point((x,y))
t = text.Text(dx, dy, rotation = rotation,
horizontalalignment='center',
verticalalignment='center')
color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha)
_text = self.get_text(lev,self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x,y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def pop_label(self,index=-1):
'''Defaults to removing last label, but any index can be supplied'''
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
trans = self.ax.transData # A bit of shorthand
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList, self.labelFontSizeList,
self.labelCValueList ):
con = self.collections[icon]
lw = self.get_label_width(lev, self.labelFmt, fsize)
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon( lc ):
slc = np.r_[ slc0, slc0[1:2,:] ]
else:
slc = slc0
if self.print_label(slc,lw): # Check if long enough for a label
x,y,ind = self.locate_label(slc, lw)
if inline: lcarg = lc
else: lcarg = None
rotation,new=self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing )
# Actually add the label
self.add_label(x,y,rotation,lev,cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n)>1: additions.append( path.Path(n) )
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Create and store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
the axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See _process_colors method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in ContourSet.contour_doc.
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', 'solid')
self.alpha = kwargs.get('alpha', 1.0)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', True)
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log scale')
else:
self.logscale = False
if self.origin is not None: assert(self.origin in
['lower', 'upper', 'image'])
if self.extent is not None: assert(len(self.extent) == 4)
if cmap is not None: assert(isinstance(cmap, colors.Colormap))
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
x, y, z = self._contour_args(*args) # also sets self.levels,
# self.layers
if self.colors is not None:
cmap = colors.ListedColormap(self.colors, N=len(self.layers))
if self.filled:
self.collections = cbook.silent_list('collections.PolyCollection')
else:
self.collections = cbook.silent_list('collections.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
self._process_colors()
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
C = _cntr.Cntr(x, y, z.filled(), _mask)
lowers = self._levels[:-1]
uppers = self._levels[1:]
for level, level_upper in zip(lowers, uppers):
nlist = C.trace(level, level_upper, points = 0,
nchunk = self.nchunk)
col = collections.PolyCollection(nlist,
antialiaseds = (self.antialiased,),
edgecolors= 'none',
alpha=self.alpha)
self.ax.add_collection(col)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
C = _cntr.Cntr(x, y, z.filled(), _mask)
for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
nlist = C.trace(level, points = 0)
col = collections.LineCollection(nlist,
linewidths = width,
linestyle = lstyle,
alpha=self.alpha)
if level < 0.0 and self.monochrome:
ls = mpl.rcParams['contour.negative_linestyle']
col.set_linestyle(ls)
col.set_label('_nolegend_')
self.ax.add_collection(col, False)
self.collections.append(col)
self.changed() # set the colors
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0,y0), (x1,y1)])
self.ax.autoscale_view()
def changed(self):
tcolors = [ (tuple(rgba),) for rgba in
self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
for color, collection in zip(tcolors, self.collections):
collection.set_alpha(self.alpha)
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
'''
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
'''
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N+1)
self.locator.create_dummy_axis()
zmax = self.zmax
zmin = self.zmin
self.locator.set_bounds(zmin, zmax)
lev = self.locator()
zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin)
if zmax >= lev[-1]:
lev[-1] += zmargin
if zmin <= lev[0]:
if self.logscale:
lev[0] = 0.99 * zmin
else:
lev[0] -= zmargin
self._auto = True
if self.filled:
return lev
return lev[1:-1]
def _initialize_x_y(self, z):
'''
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
'''
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0,x1,y0,y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0,x1,y0,y1 = (0, Nx, 0, Ny)
else:
x0,x1,y0,y1 = self.extent
dx = float(x1 - x0)/Nx
dy = float(y1 - y0)/Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x,y)
def _check_xyz(self, args):
'''
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
'''
# We can strip away the x and y units
x = self.ax.convert_xunits( args[0] )
y = self.ax.convert_yunits( args[1] )
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else: Ny, Nx = z.shape
if x.shape == z.shape and y.shape == z.shape:
return x,y,z
if x.ndim != 1 or y.ndim != 1:
raise TypeError("Inputs x and y must be 1D or 2D.")
nx, = x.shape
ny, = y.shape
if nx != Nx or ny != Ny:
raise TypeError("Length of x must be number of columns in z,\n" +
"and length of y must be number of rows.")
x,y = np.meshgrid(x,y)
return x,y,z
def _contour_args(self, *args):
if self.filled: fn = 'contourf'
else: fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
elif Nargs <=4:
x,y,z = self._check_xyz(args[:3])
else:
raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <=0 have been masked')
self.zmin = z.min()
self._auto = False
if self.levels is None:
if Nargs == 1 or Nargs == 3:
lev = self._autolev(z, 7)
else: # 2 or 4 args
level_arg = args[-1]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" % (fn,fn))
if self.filled and len(lev) < 2:
raise ValueError("Filled contours require at least 2 levels.")
# Workaround for cntr.c bug wrt masked interior regions:
#if filled:
# z = ma.masked_array(z.filled(-1e38))
# It's not clear this is any better than the original bug.
self.levels = lev
#if self._auto and self.extend in ('both', 'min', 'max'):
# raise TypeError("Auto level selection is inconsistent "
# + "with use of 'extend' kwarg")
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1],self.zmax) + 1)
self._levels = np.asarray(self._levels)
self.vmin = np.amin(self.levels) # alternative would be self.layers
self.vmax = np.amax(self.levels)
if self.extend in ('both', 'min'):
self.vmin = 2 * self.levels[0] - self.levels[1]
if self.extend in ('both', 'max'):
self.vmax = 2 * self.levels[-1] - self.levels[-2]
self.layers = self._levels # contour: a line is a thin layer
if self.filled:
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
if self.extend in ('both', 'min'):
self.layers[0] = 0.5 * (self.vmin + self._levels[1])
if self.extend in ('both', 'max'):
self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
return (x, y, z)
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels,
not on the actual range of the Z values. This means we
don't have to worry about bad values in Z, and we always have
the full dynamic range available for the selected levels.
The color is based on the midpoint of the layer, except for
an extended end layers.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
i0, i1 = 0, len(self.layers)
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 = i1 + 1
self.cvalues = range(i0, i1)
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
if not self.norm.scaled():
self.set_clim(self.vmin, self.vmax)
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
self.set_array(self.layers)
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev
else:
if cbook.iterable(linewidths) and len(linewidths) < Nlev:
linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths)))
elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]:
linewidths = [linewidths] * Nlev
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles) and len(linestyles) <= Nlev:
tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles)))
return tlinestyles
def get_alpha(self):
'''returns alpha to be applied to all ContourSet artists'''
return self.alpha
def set_alpha(self, alpha):
'''sets alpha for all ContourSet artists'''
self.alpha = alpha
self.changed()
contour_doc = """
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the Matlab
(TM) version in that it does not draw the polygon edges,
because the contouring engine yields simply connected regions
with branch cuts. To draw the edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (*x*, *y*) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the (len(*V*)-1) regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X*, *Y*, and *Z* must be arrays with the same dimensions.
*Z* may be a masked array, but filled contouring may not
handle internal masked regions correctly.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.ContourSet` object.
Optional keyword arguments:
*colors*: [ None | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ None | Colormap ]
A cm :class:`~matplotlib.cm.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*origin*: [ None | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ None | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ None | ticker.Locator subclass ]
If *locator* is None, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.cm.Colormap.set_under` and
:meth:`matplotlib.cm.Colormap.set_over` methods.
contour-only keyword arguments:
*linewidths*: [ None | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified
*linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the 'solid' is used.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
If contour is using a monochrome colormap and the contour
level is less than 0, then the linestyle specified
in ``contour.negative_linestyle`` in ``matplotlibrc``
will be used.
contourf-only keyword arguments:
*antialiased*: [ True | False ]
enable antialiasing
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
**Example:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
def find_nearest_contour( self, x, y, indices=None, pixel=True ):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments::
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices==None:
indices = range(len(self.levels))
dmin = 1e10
conmin = None
segmin = None
xmin = None
ymin = None
for icon in indices:
con = self.collections[icon]
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = self.ax.transData.transform(lc)
ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2
d = min( ds )
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = mpl.mlab.find( ds == d )[0]
xmin = lc[imin,0]
ymin = lc[imin,1]
return (conmin,segmin,imin,xmin,ymin,dmin)
| agpl-3.0 |
wolfiex/DSMACC-testing | begin.py | 1 | 5376 | #!/usr/local/anaconda/bin/python
#/usr/bin/python
import multiprocessing,os,subprocess,pickle,sys,time
#import pandas as pd
import numpy as np
import glob,sys,os
# options
available_cores = 16
pre_formatted_style=False #slower
runsaved = False
if '--saved' in sys.argv: runsaved=True #runs model using saved versions when -modelname is provided
keepx = False
if '--keepx' in sys.argv: keepx=True # does not ignore runs marked with an X in the begining
icsonly = False
if '--icsonly' in sys.argv: icsonly=True # does not ignore runs marked with an X in the begining
obs= 0
if '--obs' in sys.argv:
obs=int(tuple(open('include.obs'))[0].strip().replace('!obs:','')) # does not ignore runs marked with an X in the begining
print 'running with %s observations, with %s lines'%((obs-1.)/4.,obs)
last = False
if '--last' in sys.argv: last = -1
if '--run' in sys.argv:
for i in sys.argv:
try:
last=int(i)
except: None
print 'if obs unconstrain species where obs is used'
print sys.argv,last
##################
####read files####
file_list = glob.glob('InitCons/*.csv')
file_list.sort(key=os.path.getmtime)#getmtime - modified getctime-created
if (not os.path.exists('./model') & runsaved==0): sys.exit('No model file found. Please run "make kpp" followed by "make"')
print 'Select file to open: \n\n'
for i,f in enumerate(file_list): print i , ' - ', f.replace('InitCons/','').replace('.csv','')
if (last) : ic_file = file_list[last]
else : ic_file = file_list[int(raw_input('Enter Number \n'))]
#run simulations
#########################################################################################
ncores= available_cores # - 6 #if 6 used from openmp?
start = (time.strftime("%Y%m%d%H%M"))
# make ics
if ('.csv' not in ic_file): ic_file += '.csv'
print ic_file
os.system("touch Init_cons.dat")
os.system("rm Init_cons.dat")
os.system("./InitCons/makeics.pl %s"%ic_file)
if icsonly: sys.exit('icsonly flag is on, Initi_cons.dat file made - only. Exiting.')
import netCDF4
from netCDF4 import Dataset
from scipy.io import FortranFile
ic_open= tuple(open(ic_file))
numbered = np.array([i for i in enumerate(ic_open[2].strip().split(',')[3:])])
if not keepx: numbered = filter(lambda x: x[1][0] not in 'xX', numbered)
n_runs=len(numbered)
if (ncores > n_runs): ncores = n_runs
def read_fbin(filename):
''' this reads each written binary instance itteratively'''
f = FortranFile(filename, 'r')
array = []
while True:
try:
array.append(f.read_reals(dtype=np.float_))
except TypeError:
break
#array = np.reshape(array, (nspecs,-1))
f.close()
return array
# run dsmacc
def simulate (arg_in):
try: #each system call has to be on a new line
#os.system("touch Outputs/s_%s.empty"%('run',arg_in[1]))
#os.system("rm Outputs/s_%s.*"%('run',arg_in[1]))
start = time.strftime("%s")
description="%s_%s"%('run',arg_in[1])
model='model'
if '-' in description:
if runsaved: model='save/exec/%s/model'%(description.split('-')[-1])
else: description = description.split('-')[0]
print 'aaaa'
linenumber = "%s"%(int(arg_in[0])+1)
run ='./%s %s %s %d'%(model, description,int(linenumber),obs)
print run ; os.system(run)
return int(time.strftime("%s")) - int(start)
except Exception as e:
return 'Failed on '+ arg_in + e
#do runs
#########################################################################################
out = multiprocessing.Pool(ncores).map( simulate , numbered )
os.system('rm fort*')
#concat resutls
#########################################################################################
print '\n Calculations complete! \n Concatenating results. \n '
ic_string='' # get a string format of the intial conditions file
for line in ic_open[1:]: ic_string+=line
filename= ic_file.split('/')[-1].split('.')[0]+'_'+ time.strftime("%y%m%d%H%M")+'.nc'
ncfile = Dataset(filename,'w')
print 'add to results folder'
ncfile.initial_conditions_str = ic_string
ncfile.date = time.strftime("Completion time:%A %d %B %Y at %H:%M")
ncfile.description = ic_open[0].strip()
spec = ncfile.createDimension('spec', None)
time = ncfile.createDimension('time', None)
rate = ncfile.createDimension('rate', None)
#for each simulation
l = 0
for group_name in numbered:
print group_name
if not runsaved: group_name[1] = group_name[1].split('-')[0]
group = ncfile.createGroup(group_name[1])
specvar = group.createVariable( 'Spec' , "f8" ,('time','spec',))
ratevar = group.createVariable( 'Rate' , "f8" ,('time','rate',))
specvar[:] = read_fbin('./Outputs/run_%s_.spec'%group_name[1])
ratevar[:] = read_fbin('./Outputs/run_%s_.rate'%group_name[1])
l += len(specvar[:])
print group
specvar.head = ''.join(tuple(open('./Outputs/spec.names'))).replace(' ','').replace('\n','')
ratevar.head = ''.join(tuple(open('./Outputs/rate.names'))).replace(' ','').replace('\n','')
group.WALL_time = out[int(group_name[0])]
# close the file.
ncfile.close()a
if l <= 1 :
os.system('rm '+filename)
print 'Failed!'
else:
print '*** SUCCESS writing %s!'%filename
os.system('find . -size +100M | cat >> .gitignore')
| gpl-3.0 |
ClimbsRocks/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 161 | 1380 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
LodewijkSikkel/paparazzi | sw/misc/attitude_reference/att_ref_gui.py | 49 | 12483 | #!/usr/bin/env python
#
# Copyright (C) 2014 Antoine Drouin
#
# This file is part of paparazzi.
#
# paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
"""
This is a graphical user interface for playing with reference attitude
"""
# https://gist.github.com/zed/b966b5a04f2dfc16c98e
# https://gist.github.com/nzjrs/51686
# http://jakevdp.github.io/blog/2012/10/07/xkcd-style-plots-in-matplotlib/
# http://chimera.labs.oreilly.com/books/1230000000393/ch12.html#_problem_208 <- threads
# TODO:
# -cancel workers
#
#
#
from __future__ import print_function
from gi.repository import Gtk, GObject
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
import matplotlib.font_manager as fm
import math, threading, numpy as np, scipy.signal, pdb, copy, logging
import pat.utils as pu
import pat.algebra as pa
import control as ctl
import gui
class Reference(gui.Worker):
def __init__(self, sp, ref_impl=ctl.att_ref_default, omega=6., xi=0.8, max_vel=pu.rad_of_deg(100),
max_accel=pu.rad_of_deg(500)):
gui.Worker.__init__(self)
self.impl = ref_impl()
self.sp = sp
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
def reset_outputs(self, sp):
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
def update_type(self, _type):
#print('update_type', _type)
self.impl = _type()
self.do_work = True
#self.recompute()
def update_param(self, p, v):
#print('update_param', p, v)
self.impl.set_param(p, v)
self.do_work = True
#self.recompute()
def update_sp(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.reset_outputs(sp)
self.update(sp, ref_impl, omega, xi, max_vel, max_accel)
self.do_work = True
#self.recompute()
def update(self, sp, ref_impl=None, omega=None, xi=None, max_vel=None, max_accel=None):
self.sp = sp
if ref_impl is not None:
self.impl = ref_impl()
if omega is not None:
self.impl.set_param('omega', omega)
if xi is not None:
self.impl.set_param('xi', xi)
if max_vel is not None:
self.impl.set_param('max_vel', max_vel)
if max_accel is not None:
self.impl.set_param('max_accel', max_accel)
def recompute(self):
#print("recomputing...")
self.start((self.sp,))
def _work_init(self, sp):
#print('_work_init ', self, self.impl, sp, sp.dt)
self.euler = np.zeros((len(sp.time), pa.e_size))
self.quat = np.zeros((len(sp.time), pa.q_size))
self.vel = np.zeros((len(sp.time), pa.r_size))
self.accel = np.zeros((len(sp.time), pa.r_size))
euler0 = [0.3, 0.1, 0.2]
self.impl.set_euler(np.array(euler0))
self.quat[0], self.euler[0], self.vel[0], self.accel[0] = self.impl.quat, self.impl.euler, self.impl.vel, self.impl.accel
self.n_iter_per_step = float(len(sp.time)) / self.n_step
def _work_step(self, i, sp):
start, stop = int(i * self.n_iter_per_step), int((i + 1) * self.n_iter_per_step)
# print('_work_step of %s: i %i, start %i, stop %i' % (self.impl, i, start, stop))
for j in range(start, stop):
self.impl.update_quat(sp.quat[j], sp.dt)
self.quat[j], self.vel[j], self.accel[j] = self.impl.quat, self.impl.vel, self.impl.accel
self.euler[j] = pa.euler_of_quat(self.quat[j])
class Setpoint(object):
t_static, t_step_phi, t_step_theta, t_step_psi, t_step_random, t_nb = range(0, 6)
t_names = ["constant", "step phi", "step theta", "step psi", "step_random"]
def __init__(self, type=t_static, duration=10., step_duration=5., step_ampl=pu.rad_of_deg(10.)):
self.dt = 1. / 512
self.update(type, duration, step_duration, step_ampl)
def update(self, type, duration, step_duration, step_ampl):
self.type = type
self.duration, self.step_duration, self.step_ampl = duration, step_duration, step_ampl
self.time = np.arange(0., self.duration, self.dt)
self.euler = np.zeros((len(self.time), pa.e_size))
try:
i = [Setpoint.t_step_phi, Setpoint.t_step_theta, Setpoint.t_step_psi].index(self.type)
self.euler[:, i] = step_ampl / 2 * scipy.signal.square(math.pi / step_duration * self.time)
except Exception as e:
print(e)
pass
self.quat = np.zeros((len(self.time), pa.q_size))
for i in range(0, len(self.time)):
self.quat[i] = pa.quat_of_euler(self.euler[i])
class GUI(object):
def __init__(self, sp, refs):
self.b = Gtk.Builder()
self.b.add_from_file("ressources/att_ref_gui.xml")
w = self.b.get_object("window")
w.connect("delete-event", Gtk.main_quit)
mb = self.b.get_object("main_vbox")
self.plot = Plot(sp, refs)
mb.pack_start(self.plot, True, True, 0)
mb = self.b.get_object("main_hbox")
ref_classes = [ctl.att_ref_default, ctl.att_ref_sat_naive, ctl.att_ref_sat_nested, ctl.att_ref_sat_nested2,
ctl.AttRefFloatNative, ctl.AttRefIntNative]
self.ref_views = [gui.AttRefParamView('<b>Ref {}</b>'.format(i+1), ref_classes=ref_classes,
active_impl=r.impl) for i, r in enumerate(refs)]
for r in self.ref_views:
mb.pack_start(r, True, True, 0)
w.show_all()
class Plot(Gtk.Frame):
def __init__(self, sp, refs):
Gtk.Frame.__init__(self)
self.f = Figure()
self.canvas = FigureCanvas(self.f)
self.add(self.canvas)
self.set_size_request(1024, 600)
self.f.subplots_adjust(left=0.07, right=0.98, bottom=0.05, top=0.95,
hspace=0.2, wspace=0.2)
# self.buffer = self.canvas.get_snapshot()
def decorate(self, axis, title=None, ylab=None, legend=None):
# font_prop = fm.FontProperties(fname='Humor-Sans-1.0.ttf', size=14)
if title is not None:
axis.set_title(title) # , fontproperties=font_prop)
if ylab is not None:
axis.yaxis.set_label_text(ylab) # , fontproperties=font_prop)
if legend is not None:
axis.legend(legend) # , prop=font_prop)
axis.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
axis.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
def update(self, sp, refs):
title = [r'$\phi$', r'$\theta$', r'$\psi$']
legend = ['Ref1', 'Ref2', 'Setpoint']
for i in range(0, 3):
axis = self.f.add_subplot(331 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.euler[:, i]))
axis.plot(sp.time, pu.deg_of_rad(sp.euler[:, i]))
self.decorate(axis, title[i], *(('deg', legend) if i == 0 else (None, None)))
title = [r'$p$', r'$q$', r'$r$']
for i in range(0, 3):
axis = self.f.add_subplot(334 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.vel[:, i]))
self.decorate(axis, title[i], 'deg/s' if i == 0 else None)
title = [r'$\dot{p}$', r'$\dot{q}$', r'$\dot{r}$']
for i in range(0, 3):
axis = self.f.add_subplot(337 + i)
axis.clear()
for ref in refs:
axis.plot(sp.time, pu.deg_of_rad(ref.accel[:, i]))
self.decorate(axis, title[i], 'deg/s2' if i == 0 else None)
self.canvas.draw()
class Application(object):
def __init__(self):
self.sp = Setpoint()
self.refs = [Reference(self.sp), Reference(self.sp, ref_impl=ctl.AttRefFloatNative)]
for nref, r in enumerate(self.refs):
r.connect("progress", self.on_ref_update_progress, nref + 1)
r.connect("completed", self.on_ref_update_completed, nref + 1)
self.gui = GUI(self.sp, self.refs)
self.register_gui()
self.recompute_sequentially()
def on_ref_update_progress(self, ref, v, nref):
#print('progress', nref, v)
self.gui.ref_views[nref - 1].progress.set_fraction(v)
def on_ref_update_completed(self, ref, nref):
#print('on_ref_update_completed', ref, nref)
self.gui.ref_views[nref - 1].progress.set_fraction(1.0)
# recompute remaining refs (if any)
self.recompute_sequentially()
self.gui.plot.update(self.sp, self.refs)
def register_gui(self):
self.register_setpoint()
for i in range(0, 2):
self.gui.ref_views[i].connect(self._on_ref_changed, self._on_ref_param_changed, self.refs[i], self.gui.ref_views[i])
self.gui.ref_views[i].update_view(self.refs[i].impl)
def register_setpoint(self):
b = self.gui.b
c_sp_type = b.get_object("combo_sp_type")
for n in Setpoint.t_names:
c_sp_type.append_text(n)
c_sp_type.set_active(self.sp.type)
c_sp_type.connect("changed", self.on_sp_changed)
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
widgets = [b.get_object(name) for name in names]
adjs = [Gtk.Adjustment(self.sp.duration, 1, 100, 1, 10, 0),
Gtk.Adjustment(self.sp.step_duration, 0.1, 10., 0.1, 1., 0),
Gtk.Adjustment(pu.deg_of_rad(self.sp.step_ampl), 0.1, 180., 1, 10., 0)]
for i, w in enumerate(widgets):
w.set_adjustment(adjs[i])
w.update()
w.connect("value-changed", self.on_sp_changed)
def recompute_sequentially(self):
"""
Somehow running two threads to update both references at the same time produces bogus data..
As a workaround we simply run them one after the other.
"""
for r in self.refs:
if r.running:
return
for r in self.refs:
if r.do_work:
r.recompute()
return
def on_sp_changed(self, widget):
b = self.gui.b
_type = b.get_object("combo_sp_type").get_active()
names = ["spin_sp_duration", "spin_sp_step_duration", "spin_sp_step_amplitude"]
_duration, _step_duration, _step_amplitude = [b.get_object(name).get_value() for name in names]
#print('_on_sp_changed', _type, _duration, _step_duration, _step_amplitude)
_step_amplitude = pu.rad_of_deg(_step_amplitude)
self.sp.update(_type, _duration, _step_duration, _step_amplitude)
# somehow running two threads to update both references at the same time produces bogus data..
# as a workaround we simply run them one after the other
for r in self.refs:
r.update_sp(self.sp)
#r.recompute()
self.recompute_sequentially()
def _on_ref_changed(self, widget, ref, view):
#print('_on_ref_changed', widget, ref, view)
ref.update_type(view.get_selected_ref_class())
view.update_ref_params(ref.impl)
self.recompute_sequentially()
def _on_ref_param_changed(self, widget, p, ref, view):
#print("_on_ref_param_changed: %s %s=%s" % (ref.impl.name, p, val))
val = view.spin_cfg[p]['d2r'](widget.get_value())
ref.update_param(p, val)
self.recompute_sequentially()
def run(self):
Gtk.main()
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
Application().run()
| gpl-2.0 |
miaecle/deepchem | deepchem/utils/test/test_evaluate.py | 1 | 11874 | """Unit tests for evaluators."""
import deepchem as dc
import numpy as np
import unittest
import sklearn
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
def test_multiclass_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 5)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.argmax(y, axis=1))
def test_binary_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 2)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y, threshold=0.3)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.where(y[:, 1] >= 0.3, np.ones(10), np.zeros(10)))
def test_evaluator_dc_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_sklearn_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
rf = sklearn.ensemble.RandomForestClassifier(50)
model = dc.models.SklearnModel(rf)
model.fit(dataset)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_evaluate_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
multitask_scores = model.evaluate(
dataset, dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_multitask_evaluator():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores, all_task_scores = evaluator.compute_model_performance(
metric, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_model_evaluate_dc_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = model.evaluate(dataset, metric, [])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multitask_model_evaluate_sklearn():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores, all_task_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['metric-1'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_multitask_model_evaluate():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
multitask_scores, all_task_scores = model.evaluate(
dataset, dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] > 0
assert isinstance(all_task_scores, dict)
def test_evaluator_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric1 = dc.metrics.Metric(dc.metrics.mae_score, n_tasks=2)
metric2 = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=2)
multitask_scores = evaluator.compute_model_performance([metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_model_evaluate_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric1 = dc.metrics.Metric(dc.metrics.mae_score)
metric2 = dc.metrics.Metric(dc.metrics.r2_score)
multitask_scores = model.evaluate(dataset, [metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_generator_evaluator_dc_metric_multitask_single_point():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert len(multitask_scores) == 1
def test_evaluator_sklearn_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_generator_evaluator_dc_metric_multitask():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_model_evaluate_sklearn_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(dataset, dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_evaluator_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
[dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_model_evaluate_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(
dataset, [dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_gc_binary_classification():
"""Test multiclass classification evaluation."""
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_gc_binary_kappa_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC", "CO", "CCC", "CCCC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.kappa_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] <= 1
assert multitask_scores["metric-1"] >= -1
def test_gc_multiclass_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(5, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification", n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
| mit |
aattaran/Machine-Learning-with-Python | naive_bayes/GaussianNB Deployment on Terrain Data/main.py | 2 | 1533 | #!/usr/bin/python
""" Complete the code in ClassifyNB.py with the sklearn
Naive Bayes classifier to classify the terrain data.
The objective of this exercise is to recreate the decision
boundary found in the lesson video, and make a plot that
visually shows the decision boundary """
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture, output_image
from ClassifyNB import classify
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
# You will need to complete this function imported from the ClassifyNB script.
# Be sure to change to that code tab to complete this quiz.
clf = classify(features_train, labels_train)
### draw the decision boundary with the text points overlaid
prettyPicture(clf, features_test, labels_test)
output_image("test.png", "png", open("test.png", "rb").read())
| bsd-3-clause |
xiaoxq/apollo | modules/tools/mapshow/libs/subplot_traj_speed.py | 3 | 3099 | #!/usr/bin/env python3
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from matplotlib import cm as cmx
from matplotlib import colors as mcolors
import matplotlib.pyplot as plt
class TrajSpeedSubplot:
def __init__(self, ax):
self.ax = ax
self.speed_lines = []
self.speed_lines_size = 30
self.colors = []
self.init_colors()
# self.colors = ['b','r', 'y', 'k']
for i in range(self.speed_lines_size):
line, = ax.plot(
[0], [0],
c=self.colors[i % len(self.colors)],
ls="-",
marker='',
lw=3,
alpha=0.8)
self.speed_lines.append(line)
ax.set_xlabel("t (second)")
# ax.set_xlim([-2, 10])
ax.set_ylim([-1, 25])
self.ax.autoscale_view()
# self.ax.relim()
ax.set_ylabel("speed (m/s)")
ax.set_title("PLANNING SPEED")
self.set_visible(False)
def init_colors(self):
self.colors = []
values = list(range(self.speed_lines_size))
jet = plt.get_cmap('brg')
color_norm = mcolors.Normalize(vmin=0, vmax=values[-1])
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap=jet)
for val in values:
color_val = scalar_map.to_rgba(val)
self.colors.append(color_val)
def set_visible(self, visible):
for line in self.speed_lines:
line.set_visible(visible)
def show(self, planning):
planning.traj_data_lock.acquire()
for i in range(len(planning.traj_speed_t_history)):
if i >= self.speed_lines_size:
print("WARNING: number of path lines is more than "
+ str(self.speed_lines_size))
continue
speed_line = self.speed_lines[self.speed_lines_size - i - 1]
speed_line.set_xdata(planning.traj_speed_t_history[i])
speed_line.set_ydata(planning.traj_speed_v_history[i])
# speed_line.set_xdata([1,2,3,4])
# speed_line.set_ydata([1,2,3,4])
# speed_line.set_label(name[0:5])
speed_line.set_visible(True)
# self.ax.legend(loc="upper left", borderaxespad=0., ncol=5)
# self.ax.axis('equal')
planning.traj_data_lock.release()
self.ax.autoscale_view()
self.ax.relim()
| apache-2.0 |
Liam3851/fredapi | fredapi/fred.py | 1 | 19208 |
import os
import sys
import xml.etree.ElementTree as ET
if sys.version_info[0] >= 3:
import urllib.request as url_request
import urllib.parse as url_parse
import urllib.error as url_error
else:
import urllib2 as url_request
import urllib as url_parse
import urllib2 as url_error
import pandas as pd
urlopen = url_request.urlopen
quote_plus = url_parse.quote_plus
urlencode = url_parse.urlencode
HTTPError = url_error.HTTPError
class Fred(object):
earliest_realtime_start = '1776-07-04'
latest_realtime_end = '9999-12-31'
nan_char = '.'
max_results_per_request = 1000
root_url = 'https://api.stlouisfed.org/fred'
def __init__(self,
api_key=None,
api_key_file=None):
"""
Initialize the Fred class that provides useful functions to query the Fred dataset. You need to specify a valid
API key in one of 3 ways: pass the string via api_key, or set api_key_file to a file with the api key in the
first line, or set the environment variable 'FRED_API_KEY' to the value of your api key. You can sign up for a
free api key on the Fred website at http://research.stlouisfed.org/fred2/
"""
self.api_key = None
if api_key is not None:
self.api_key = api_key
elif api_key_file is not None:
f = open(api_key_file, 'r')
self.api_key = f.readline().strip()
f.close()
else:
self.api_key = os.environ.get('FRED_API_KEY')
if self.api_key is None:
import textwrap
raise ValueError(textwrap.dedent("""\
You need to set a valid API key. You can set it in 3 ways:
pass the string with api_key, or set api_key_file to a
file with the api key in the first line, or set the
environment variable 'FRED_API_KEY' to the value of your
api key. You can sign up for a free api key on the Fred
website at http://research.stlouisfed.org/fred2/"""))
def __fetch_data(self, url):
"""
helper function for fetching data given a request URL
"""
url += '&api_key=' + self.api_key
try:
response = urlopen(url)
root = ET.fromstring(response.read())
except HTTPError as exc:
root = ET.fromstring(exc.read())
raise ValueError(root.get('message'))
return root
def _parse(self, date_str, format='%Y-%m-%d'):
"""
helper function for parsing FRED date string into datetime
"""
rv = pd.to_datetime(date_str, format=format)
if hasattr(rv, 'to_pydatetime'):
rv = rv.to_pydatetime()
return rv
def get_series_info(self, series_id):
"""
Get information about a series such as its title, frequency, observation start/end dates, units, notes, etc.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a pandas Series containing information about the Fred series
"""
url = "%s/series?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None or not len(root):
raise ValueError('No info exists for series id: ' + series_id)
info = pd.Series(root.getchildren()[0].attrib)
return info
def get_series(self, series_id, observation_start=None, observation_end=None, **kwargs):
"""
Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series_latest_release()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
observation_start : datetime or datetime-like str such as '7/1/2014', optional
earliest observation date
observation_end : datetime or datetime-like str such as '7/1/2014', optional
latest observation date
kwargs : additional parameters
Any additional parameters supported by FRED. You can see https://api.stlouisfed.org/docs/fred/series_observations.html for the full list
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
url = "%s/series/observations?series_id=%s" % (self.root_url, series_id)
if observation_start is not None:
observation_start = pd.to_datetime(observation_start,
errors='raise')
url += '&observation_start=' + observation_start.strftime('%Y-%m-%d')
if observation_end is not None:
observation_end = pd.to_datetime(observation_end, errors='raise')
url += '&observation_end=' + observation_end.strftime('%Y-%m-%d')
if kwargs.keys():
url += '&' + urlencode(kwargs)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
data[self._parse(child.get('date'))] = val
return pd.Series(data)
def get_series_latest_release(self, series_id):
"""
Get data for a Fred series id. This fetches the latest known data, and is equivalent to get_series()
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
info : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
return self.get_series(series_id)
def get_series_first_release(self, series_id):
"""
Get first-release data for a Fred series id. This ignores any revision to the data series. For instance,
The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0.
This will ignore revisions after the first release.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
df = self.get_series_all_releases(series_id)
first_release = df.groupby('date').head(1)
data = first_release.set_index('date')['value']
return data
def get_series_as_of_date(self, series_id, as_of_date):
"""
Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series
before or on as_of_date, but ignores any revision on dates after as_of_date.
Parameters
----------
series_id : str
Fred series id such as 'GDP'
as_of_date : datetime, or datetime-like str such as '10/25/2014'
Include data revisions on or before this date, and ignore revisions afterwards
Returns
-------
data : Series
a Series where each index is the observation date and the value is the data for the Fred series
"""
as_of_date = pd.to_datetime(as_of_date)
df = self.get_series_all_releases(series_id)
data = df[df['realtime_start'] <= as_of_date]
return data
def get_series_all_releases(self, series_id):
"""
Get all data for a Fred series id including first releases and all revisions. This returns a DataFrame
with three columns: 'date', 'realtime_start', and 'value'. For instance, the US GDP for Q4 2013 was first released
to be 17102.5 on 2014-01-30, and then revised to 17080.7 on 2014-02-28, and then revised to 17089.6 on
2014-03-27. You will therefore get three rows with the same 'date' (observation date) of 2013-10-01 but three
different 'realtime_start' of 2014-01-30, 2014-02-28, and 2014-03-27 with corresponding 'value' of 17102.5, 17080.7
and 17089.6
Parameters
----------
series_id : str
Fred series id such as 'GDP'
Returns
-------
data : DataFrame
a DataFrame with columns 'date', 'realtime_start' and 'value' where 'date' is the observation period and 'realtime_start'
is when the corresponding value (either first release or revision) is reported.
"""
url = "%s/series/observations?series_id=%s&realtime_start=%s&realtime_end=%s" % (self.root_url,
series_id,
self.earliest_realtime_start,
self.latest_realtime_end)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No data exists for series id: ' + series_id)
data = {}
i = 0
for child in root.getchildren():
val = child.get('value')
if val == self.nan_char:
val = float('NaN')
else:
val = float(val)
realtime_start = self._parse(child.get('realtime_start'))
# realtime_end = self._parse(child.get('realtime_end'))
date = self._parse(child.get('date'))
data[i] = {'realtime_start': realtime_start,
# 'realtime_end': realtime_end,
'date': date,
'value': val}
i += 1
data = pd.DataFrame(data).T
return data
def get_series_vintage_dates(self, series_id):
"""
Get a list of vintage dates for a series. Vintage dates are the dates in history when a
series' data values were revised or new data values were released.
Parameters
----------
series_id : str
Fred series id such as 'CPIAUCSL'
Returns
-------
dates : list
list of vintage dates
"""
url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id)
root = self.__fetch_data(url)
if root is None:
raise ValueError('No vintage date exists for series id: ' + series_id)
dates = []
for child in root.getchildren():
dates.append(self._parse(child.text))
return dates
def __do_series_search(self, url):
"""
helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
"""
root = self.__fetch_data(url)
series_ids = []
data = {}
num_results_returned = 0 # number of results returned in this HTTP request
num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned
for child in root.getchildren():
num_results_returned += 1
series_id = child.get('id')
series_ids.append(series_id)
data[series_id] = {"id": series_id}
fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end",
"frequency", "frequency_short", "units", "units_short", "seasonal_adjustment",
"seasonal_adjustment_short", "last_updated", "popularity", "notes"]
for field in fields:
data[series_id][field] = child.get(field)
if num_results_returned > 0:
data = pd.DataFrame(data, columns=series_ids).T
# parse datetime columns
for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]:
data[field] = data[field].apply(self._parse, format=None)
# set index name
data.index.name = 'series id'
else:
data = None
return data, num_results_total
def __get_search_results(self, url, limit, order_by, sort_order, filter):
"""
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API
truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
"""
order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated',
'observation_start', 'observation_end', 'popularity']
if order_by is not None:
if order_by in order_by_options:
url = url + '&order_by=' + order_by
else:
raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options)))
if filter is not None:
if len(filter) == 2:
url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1])
else:
raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)')
sort_order_options = ['asc', 'desc']
if sort_order is not None:
if sort_order in sort_order_options:
url = url + '&sort_order=' + sort_order
else:
raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options)))
data, num_results_total = self.__do_series_search(url)
if data is None:
return data
if limit == 0:
max_results_needed = num_results_total
else:
max_results_needed = limit
if max_results_needed > self.max_results_per_request:
for i in range(1, max_results_needed // self.max_results_per_request + 1):
offset = i * self.max_results_per_request
next_data, _ = self.__do_series_search(url + '&offset=' + str(offset))
data = data.append(next_data)
return data.head(max_results_needed)
def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None):
"""
Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame.
Parameters
----------
text : str
text to do fulltext search on, e.g., 'Real GDP'
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/series/search?search_text=%s&" % (self.root_url,
quote_plus(text))
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
return info
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a release id. Returns information about matching series in a DataFrame.
Parameters
----------
release_id : int
release id, e.g., 151
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/release/series?release_id=%d" % (self.root_url, release_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for release id: ' + str(release_id))
return info
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None):
"""
Search for series that belongs to a category id. Returns information about matching series in a DataFrame.
Parameters
----------
category_id : int
category id, e.g., 32145
limit : int, optional
limit the number of results to this value. If limit is 0, it means fetching all results without limit.
order_by : str, optional
order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency',
'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end',
'popularity'
sort_order : str, optional
sort the results by ascending or descending order. Valid options are 'asc' or 'desc'
filter : tuple, optional
filters the results. Expects a tuple like (filter_variable, filter_value).
Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment'
Returns
-------
info : DataFrame
a DataFrame containing information about the matching Fred series
"""
url = "%s/category/series?category_id=%d&" % (self.root_url,
category_id)
info = self.__get_search_results(url, limit, order_by, sort_order, filter)
if info is None:
raise ValueError('No series exists for category id: ' + str(category_id))
return info
| apache-2.0 |
rhlobo/bigtempo | bigtempo/processors/dataframe_task.py | 1 | 3801 | # -*- coding: utf-8 -*-
import pandas
import datetime
import bigtempo.utils as utils
_DEFAULT_FREQUENCY = 'B'
_KNOWN_FREQUENCIES = ['U', 'L', 'S', 'T', 'H', 'B', 'W', 'BMS', 'BM', 'MS', 'M', 'BQS', 'BQ', 'QS', 'Q', 'BAS', 'BA', 'AS', 'A']
_FREQUENCY_ENUMERATION_DICT = dict((y, x) for x, y in enumerate(_KNOWN_FREQUENCIES))
def factory(instance, registration, dependency_dict, *args, **kwargs):
return DataFrameDatasourceTask(instance, registration, dependency_dict)
class DataFrameDatasourceTask(object):
def __init__(self, instance, registration, dependency_dict):
self._instance = instance
self._dependency_dict = dependency_dict
self._registration = registration
def process(self, symbol, start=None, end=None):
context = self._create_context_for(symbol, start, end)
result = self._instance.evaluate(context, symbol, start, end)
return utils.slice(result, start, end)
def _create_context_for(self, symbol, start=None, end=None):
evaluated_dependencies = self._evaluate_datasource_dependencies(symbol, start, end)
return DatasourceContext(evaluated_dependencies)
def _evaluate_datasource_dependencies(self, symbol, start=None, end=None):
result = {}
new_start = None if not start else evaluate_loopback_period(self._registration,
self._dependency_dict.values(),
start)
for reference, dependency in self._dependency_dict.iteritems():
result[reference] = dependency.process(symbol, new_start, end)
return result
class DatasourceContext(object):
def __init__(self, dependencies):
self._dependencies = dependencies
def dependencies(self, reference=None):
return self._dependencies.get(reference) if reference else self._dependencies
def evaluate_loopback_period(datasource_registration, dependencies, date):
lookback = datasource_registration['lookback']
frequency = determine_frequency(datasource_registration.get('frequency'), dependencies)
# Holiday workaround
if frequency in ['B', 'C']:
lookback = 1 + int(lookback * 1.08)
lookback += 1
return relative_period(-lookback, frequency, date)
def determine_frequency(datasource_frequency=None, dependencies=None):
if datasource_frequency is not None:
return datasource_frequency
if dependencies is None or len(dependencies) is 0:
return _DEFAULT_FREQUENCY
dependencies_frequencies = []
for dependency in dependencies:
dependency_frequency = dependency._registration.get('frequency')
dependency_dependencies = dependency._dependency_dict.values()
dependencies_frequencies.append(determine_frequency(dependency_frequency, dependency_dependencies))
return max(dependencies_frequencies, key=_frequency_sort_key)
def _frequency_sort_key(value):
frequency = value.split('-')[0]
if frequency not in _FREQUENCY_ENUMERATION_DICT:
return 0
return _FREQUENCY_ENUMERATION_DICT[frequency]
def relative_period(periods, frequency, date=None):
business_day = equivalent_business_day() if not date else equivalent_business_day(date)
return (pandas.Period(business_day, freq=frequency) + periods).to_timestamp()
def equivalent_business_day(date=None):
if not date:
date = datetime.datetime.today().replace(hour=0,
minute=0,
second=0,
microsecond=0)
isoweekday = date.isoweekday()
return date if isoweekday <= 5 else date - datetime.timedelta(isoweekday - 5)
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/indexes/period/test_construction.py | 6 | 19404 | import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.core.indexes.period as period
from pandas.compat import lrange, PY3, text_type, lmap
from pandas import (Period, PeriodIndex, period_range, offsets, date_range,
Series, Index)
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq='2Q-DEC')
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
pytest.raises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
pytest.raises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_nano(self):
idx = period_range(start=Period(ordinal=1, freq='N'),
end=Period(ordinal=4, freq='N'), freq='N')
exp = PeriodIndex([Period(ordinal=1, freq='N'),
Period(ordinal=2, freq='N'),
Period(ordinal=3, freq='N'),
Period(ordinal=4, freq='N')], freq='N')
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
pytest.raises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
pytest.raises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
pytest.raises(ValueError, PeriodIndex, start=start, end=end)
pytest.raises(ValueError, PeriodIndex, start=start)
pytest.raises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
pytest.raises(ValueError, PeriodIndex, idx._values)
pytest.raises(ValueError, PeriodIndex, list(idx._values))
pytest.raises(TypeError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq='M')
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq, 'M'
result = PeriodIndex(idx, freq='2M')
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq, '2M'
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq('2M'))
assert result.freq, '2M'
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
pytest.raises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(['2013-01', '2013-03'], dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-03'], freq='M')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[M]'
idx = PeriodIndex(['2013-01-05', '2013-03-05'], dtype='period[3D]')
exp = PeriodIndex(['2013-01-05', '2013-03-05'], freq='3D')
tm.assert_index_equal(idx, exp)
assert idx.dtype == 'period[3D]'
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(['2013-01-01', '2013-01-02'], freq='D')
res = PeriodIndex(idx, dtype='period[M]')
exp = PeriodIndex(['2013-01', '2013-01'], freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
res = PeriodIndex(idx, freq='M')
tm.assert_index_equal(res, exp)
assert res.dtype == 'period[M]'
msg = 'specified freq and dtype are different'
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(['2011-01'], freq='M', dtype='period[D]')
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq='M')
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == 'M'
with tm.assert_raises_regex(ValueError, 'freq not specified'):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(np.array([pd.NaT, pd.NaT,
Period('2011-01', freq='M'),
Period('2011-01', freq='M')]))
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, '2011-01', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex([pd.NaT, pd.NaT])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(['NaT', 'NaT'])
with tm.assert_raises_regex(ValueError, 'freq not specified'):
PeriodIndex(np.array(['NaT', 'NaT']))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([Period('2011-01', freq='M'), pd.NaT,
Period('2011-01', freq='D')]))
# first element is pd.NaT
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')])
with tm.assert_raises_regex(period.IncompatibleFrequency, msg):
PeriodIndex(np.array([pd.NaT, Period('2011-01', freq='M'),
Period('2011-01', freq='D')]))
def test_constructor_mixed(self):
idx = PeriodIndex(['2011-01', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['2011-01', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(['NaT', pd.NaT, Period('2011-01', freq='M')])
exp = PeriodIndex(['NaT', 'NaT', '2011-01'], freq='M')
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period('2011-01-01', freq='D'), pd.NaT,
'2012-01-01'])
exp = PeriodIndex(['2011-01-01', 'NaT', '2012-01-01'], freq='D')
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=2, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')],
'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(np.array([pd.Period('2007-01', freq='M'),
pd.Period('2007-02', freq='M')]),
'p', freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq='M', name='p')
result = idx._simple_new(idx, name='p', freq='M')
tm.assert_index_equal(result, idx)
def test_constructor_floats(self):
# GH13079
for floats in [[1.1, 2.1], np.array([1.1, 2.1])]:
with pytest.raises(TypeError):
pd.PeriodIndex._simple_new(floats, freq='M')
with pytest.raises(TypeError):
pd.PeriodIndex(floats, freq='M')
def test_constructor_nat(self):
pytest.raises(ValueError, period_range, start='NaT',
end='2011-01-01', freq='M')
pytest.raises(ValueError, period_range, start='2011-01-01',
end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
for func in [PeriodIndex, period_range]:
# must be the same, but for sure...
pidx = func(start='2014-01', freq='2M', periods=4)
expected = PeriodIndex(['2014-01', '2014-03',
'2014-05', '2014-07'], freq='2M')
tm.assert_index_equal(pidx, expected)
pidx = func(start='2014-01-02', end='2014-01-15', freq='3D')
expected = PeriodIndex(['2014-01-02', '2014-01-05',
'2014-01-08', '2014-01-11',
'2014-01-14'], freq='3D')
tm.assert_index_equal(pidx, expected)
pidx = func(end='2014-01-01 17:00', freq='4H', periods=3)
expected = PeriodIndex(['2014-01-01 09:00', '2014-01-01 13:00',
'2014-01-01 17:00'], freq='4H')
tm.assert_index_equal(pidx, expected)
msg = ('Frequency must be positive, because it'
' represents span: -1M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='-1M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(['2011-01'], freq='0M')
msg = ('Frequency must be positive, because it' ' represents span: 0M')
with tm.assert_raises_regex(ValueError, msg):
period_range('2011-01', periods=3, freq='0M')
def test_constructor_freq_mult_dti_compat(self):
import itertools
mults = [1, 2, 3, 4, 5]
freqs = ['A', 'M', 'D', 'T', 'S']
for mult, freq in itertools.product(mults, freqs):
freqstr = str(mult) + freq
pidx = PeriodIndex(start='2014-04-01', freq=freqstr, periods=10)
expected = date_range(start='2014-04-01', freq=freqstr,
periods=10).to_period(freqstr)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ['1D1H', '1H1D']:
pidx = PeriodIndex(['2016-01-01', '2016-01-02'], freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 00:00'],
freq='25H')
for freq, func in zip(['1D1H', '1H1D'], [PeriodIndex, period_range]):
pidx = func(start='2016-01-01', periods=2, freq=freq)
expected = PeriodIndex(['2016-01-01 00:00', '2016-01-02 01:00'],
freq='25H')
tm.assert_index_equal(pidx, expected)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert len(pi) == 9
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert len(pi) == 4 * 9
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert len(pi) == 12 * 9
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert len(pi) == 365 * 9 + 2
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert len(pi) == 261 * 9
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert len(pi) == 365 * 24
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert len(pi) == 24 * 60
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert len(pi) == 24 * 60 * 60
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
pytest.raises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
pytest.raises(ValueError, PeriodIndex, vals)
def test_constructor_error(self):
start = Period('02-Apr-2005', 'B')
end_intv = Period('2006-12-31', ('w', 1))
msg = 'Start and end must have same freq'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start, end=end_intv)
msg = 'Must specify 2 of start, end, periods'
with tm.assert_raises_regex(ValueError, msg):
PeriodIndex(start=start)
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if PY3:
# unicode
types += text_type,
for t in types:
expected = Index(lmap(t, raw))
res = index.map(t)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, t) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
class TestSeriesPeriod(object):
def setup_method(self, method):
self.series = Series(period_range('2000-01-01', periods=10, freq='D'))
def test_constructor_cant_cast_period(self):
with pytest.raises(TypeError):
Series(period_range('2000-01-01', periods=10, freq='D'),
dtype=float)
def test_constructor_cast_object(self):
s = Series(period_range('1/1/2000', periods=10), dtype=object)
exp = Series(period_range('1/1/2000', periods=10))
tm.assert_series_equal(s, exp)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.