repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jamesonthecrow/geosim | geosim.py | 1 | 23826 | import numpy
import networkx
import matplotlib.pyplot as pyplot
import matplotlib
import random
matplotlib.rc('text', usetex=True)
# Default parameters
_DEFAULT_PARAMS = {
'rho': lambda: 0.1*numpy.random.randn() + 0.6,
'gamma': 0.21,
'alpha': lambda: 0.3, # Constant for all users
'beta': 0.8,
'tau': 17.0, # Cuttoff on time between calls
'xmin': 1.0,
'network_type': 'sim',
'fname': None, # Used to load an empirical network
'nusers': 100,
'nlocations': 100,
'ncontacts': 30,
'nsteps': 100,
}
class GeoSim():
'''
An implementation of the GeoSim mobility model described by Toole et al.
The GeoSim model is a model that reproduces patterns of mobility within cities
taking into account individual and social preferences. The model takes
a social network, either generated or empirical, as input and simulates
the mobility of agents. Designed to model movements within cities, agents
may visit a number of discrete locations where the distance between
locations is assumed to have little affect on choices of where to go.
Instead, choices are based on preferential return to previously visited
locations as well as the locations of social contacts. This model is
an extension of Individual Mobility Model described by Song et al.
doi:10.1038/nphys1760
The model works as follows:
- A number of users are distributed randomly across N locations and
connected as part of a social network.
- Agents are assigned a waiting time at a given location after which
they will make a jump to another location.
- Agents choose their next destination based on either individual
preferences or social behavior.
- With probability rho*S^(-gamma) an agent explores and visits a
location they have not previously visited, where rho and gamma are
parameters and S is the number of unique locations they have visited
thus far.
- With probability (1-rho*S^(-gamma)) an agent returns to a location
they have visited previously.
- Given exploration, with probability alpha an agent choses a new
location to visit based on preferential visit to a contacts location.
The probability a contact is chosen is proportional to the cosine
similarity of the visit frequences between the agent and each contact.
- With probability (1-alpha) an agent chooses a new location to visit
uniformly at random.
- Given the return to a previously visited location, with probability
alpha, the agent again chooses a contact proportional to the cosine
similarities. Given this choice, they will preferentially visit
a location visited by this contact and that the agent has already
visited.
- With probability (1-alpha), the agent preferentially returns to a
location based only on their own past visit frequencies.
The cosine similiarty is measured between the visitation frequencies of
two agents.
Choices made "preferentially" refer to cases where the choice of a location
to visit is directly proportial to the frequency that location has been
visited in the past. For example, if an agent preferentially chooses a
location based on the visits of their social contact, the probility of
choosing location k is proportional to the fraction of all visits that
contact is observed at k.
When alpha=0, we recover the Individual mobility model of Song et al.
A number of parameters can be used for the GeoSim model. In general, these
parameters are global to each agent in the simulation, but some must be
provided as distributions. Specifically, rho and alpha may be different
for different agents, representing heterogeneity in the social preferences
of different individuals. These parameters must be input as generating
functions that return a value for each agent when the model is initialized.
For example, the assign each agent the same value of alpha, a lambda
function can be passed: lambda: 0.3
To assign a parameter randomly with a normal distribution, a lamda function
of the following form can be used: lambda: numpy.random.randn()
'''
def __init__(self, params=None):
'''Creates a new GeoSim model object.
Args:
params - dict, a dictionary of parameter values. If this is not
provided, defaults are set.
'''
self.graph = None
self.params = _DEFAULT_PARAMS;
if params:
for param, value in params.iteritems():
self.params[param] = value
self.initialize_sim()
def initialize_sim(self, type='lognorm'):
'''Initializes the simulation.
Three types of network can be specified, a real network can be provided
with a filename to a pickled networkx object, a small world network
created using the small world generated in networkx, or a random graph
with a lognormal degree distribution.
'''
# Intialize the network.
if self.params['network_type'] == 'real':
self.create_network_from_file(self.params['fname'])
elif self.params['network_type'] == 'smallworld':
self.create_smallworld_network()
else:
self.create_lognormal_network()
# Intialize the attributes of nodes and edges.
self.set_initial_attributes()
# Initialize the locations of users.
self.set_initial_locations()
# Compute initial similarity.
self.calculate_similarity()
# The mode tracks the average growth of the number of unique locations,
# S, over the length of the similation. We compute and store these
# values as the time goes on. We measure S(t) at various points along
# the simulation.
max_time_pow = numpy.ceil(numpy.log10(self.params['nsteps']))
self.time = numpy.logspace(1, max_time_pow, 20)
self.St = numpy.zeros(len(self.time))
def set_initial_attributes(self):
'''Initialize the attributes of nodes in the social network.'''
for i in self.graph.nodes_iter():
# A location vector for a user tracks the number of visits made
# by a user to each location. It is initially all zeros as the
# user has yet to travel anywhere.
self.graph.node[i]['lvec'] = numpy.zeros(self.params['nlocations'],
dtype=int)
self.graph.node[i]['S'] = 0
# Set alpha and rho
self.graph.node[i]['alpha'] = self.params['alpha']()
self.graph.node[i]['rho'] = self.params['rho']()
def set_initial_locations(self):
'''Sets the initial location for each user.'''
# Each user is assigned to a location uniformly at random.
#a = numpy.arange(self.params['nlocations'], dtype=float)
#p = a / numpy.sum(a)
#p = numpy.cumsum(p)
for i in self.graph.nodes_iter():
#r = numpy.random.rand(self.graph.node[i]['S'])
#l = numpy.digitize(r, p)
l = numpy.random.randint(self.params['nlocations'])
self.graph.node[i]['lvec'][l] = 1
while self.graph.node[i]['S'] < 1:
l = numpy.random.randint(self.params['nlocations'])
if self.graph.node[i]['lvec'][l] == 0:
self.graph.node[i]['S'] += 1
self.graph.node[i]['lvec'][l] += 1
def create_network_from_file(self, filename):
'''Loads a network from a pickled networkx file.'''
self.graph = networkx.read_gpickle(filename)
self.params['nusers'] = self.graph.number_of_nodes()
def create_smallworld_network(self, prob=0.9):
'''Creates a small world network.'''
self.graph = networkx.newman_watts_strogatz_graph(
self.params['nusers'], self.params['ncontacts'], prob)
def create_lognormal_network(self):
'''Creates a network with a lognormal degree distribution.'''
self.graph = networkx.Graph()
nodes = numpy.arange(self.params['nusers'])
degs = numpy.random.lognormal(numpy.log(self.params['ncontacts']),
0.3, self.params['nusers'])
for i in nodes:
self.graph.add_node(i)
# connect the network
for i in nodes:
stubs_left = degs[i] - self.graph.degree(i)
if stubs_left > 0:
nbrs = []
while len(nbrs) < stubs_left:
tries = 0
j = nodes[numpy.random.randint(self.params['nusers'])]
while ((degs[j] - self.graph.degree(j) <= 0 or
i == j) and (tries < 1000)):
j = nodes[numpy.random.randint(self.params['nusers'])]
tries += 1
nbrs.append(j)
edges = [(i, j, {'sim': None}) for j in nbrs]
self.graph.add_edges_from(edges)
if i % (self.params['nusers'] / 10) == 0:
print i, self.params['nusers']
def run(self):
'''Runs the GeoSim Model.'''
print 'Running Model...'
t = 10 # time starts at 10 hours
tidx = numpy.digitize([t], self.time)[0]
# Vector to track the time that the users will make their next move.
nexttime = numpy.zeros(self.params['nusers']) + t
# For each hour of the simulation, do...
while t < self.params['nsteps']:
# Report evey 30 days
if t%(30*24) == 0:
print 'Day %d' % int(t/24)
# Every ten days of similuation, recompute the similarity values.
if t%(24*10) == 0:
self.calculate_similarity()
# At certain points, compute the S(t) and store for analysis.
if t > self.time[tidx]:
self.calculate_st(tidx)
tidx+=1
# For each user...
for u in xrange(self.params['nusers']):
# If it is not time for the user to move, do not do anything.
if t < nexttime[u]:
continue
# If the user is moving, make choices based on the dynamics
# outline in the GeoSim paper.
r = numpy.random.rand()
tries = 0
l = None
explore_probability = (self.graph.node[u]['rho'] *
self.graph.node[u]['S'] **
(-self.params['gamma']))
# Explore...
if r < explore_probability:
r = numpy.random.rand()
# Make an individual choice...
if r > self.graph.node[u]['alpha']:
# Choose a random location until one is found that
# has not been visited before.
while True:
l = self.get_random_location()
tries += 1
if (self.graph.node[u]['lvec'][l] == 0 or
tries > 100):
break
# Make a social choice...
else:
# Choose a location based on social contacts until one
# is found that has not been visited before.
while True:
l = self.get_social_location(u)
tries += 1
if (self.graph.node[u]['lvec'][l] == 0 or
tries > 100):
break
self.graph.node[u]['S'] += 1
# Return.
else:
r = numpy.random.rand()
# Make an individual choice...
if r > self.graph.node[u]['alpha']:
l = self.get_return_location(u)
# Make a social choice.
else:
# Choose locations based on the visits of social
# contacts until one is found that has been visited
# by the agent before.
while True:
l = self.get_social_location(u)
tries += 1
if (self.graph.node[u]['lvec'][l] != 0 or
tries > 100):
break
# If a location has not been chosen yet, assign a random one.
# This should not happen very often.
if not l:
l = self.get_random_location()
if self.graph.node[u]['lvec'][l] == 0:
self.graph.node[u]['S'] += 1
#nextlocs[u] = l
self.graph.node[u]['lvec'][l] += 1
nexttime[u] += self.get_next_time()
t += 1
# After the simulation has finished, compute the final cosine similarity
# and predictability of users.
self.calculate_similarity()
self.calculate_predictability()
def get_return_location(self, node_id):
'''
Choose a location by preferential return.
The probability an individual returns to a location is proportional
to the frequency they visit those locations.
Args:
node_id - the id of the node making the choice.
Returns:
location - the index of a location to visit.
'''
lvec = self.graph.node[node_id]['lvec']
p = numpy.cumsum(lvec)/float(numpy.sum(lvec))
r = numpy.random.rand()
return numpy.digitize([r], p)[0]
def get_social_location(self, node_id):
'''
Choose a social contact and then choose one of their locations to visit
A contact is chosen with probability proportional to the cosine
similarity between the visitation patterns of the two nodes.
Args:
node_id - the id of the node making the choice.
Returns:
location - the index of a location to visit.
'''
p = numpy.array([v['sim'] for v in self.graph[node_id].values()])
p = numpy.cumsum(p)/float(numpy.sum(p))
r = numpy.random.rand()
f = numpy.digitize([r], p)[0]
f = self.graph.neighbors(node_id)[f]
return self.get_return_location(f)
def get_random_location(self):
'''
Returns a location uniformly at random.
Returns:
location - the index of a location to visit.
'''
return numpy.random.randint(self.params['nlocations'])
def get_next_time(self):
'''
Generates a random waiting time for a user to remain at a location.
The distribution is based off of the empirical measurements of Song
et al. and follows an power law distribution with an exponential cutoff
at tau hours.
Return:
waiting_time - The number of hours a user will remain at a location.
'''
return randomht(1, self.params['xmin'], 1+self.params['beta'],
1./self.params['tau'])[0]
def calculate_similarity(self):
'''
Calculates the mobility similarity between every two connected nodes.
The mobility similarity is definited as the cosine similarity of the
location vectors (visit frequencies) of two users.
The values are assigned to networkx graph edge attributes.
'''
print 'Calculating similarity.'
for i in self.graph.nodes_iter():
l1 = self.graph.node[i]['lvec']
for j in self.graph.neighbors(i):
l2 = self.graph.node[j]['lvec']
self.graph.edge[i][j]['sim'] = cosine_similarity(l1, l2)
def calculate_predictability(self):
'''
Calculates the predictability of each agent based on their contacts.
The predictability is computed by calculating the ratio between the
magnitude of their location vector and a predicted location vector
constructed as a linear combination of the location vectors of their
contacts.
'''
print 'Calculating predictability.'
for u in self.graph.nodes_iter():
uvec = self.graph.node[u]['lvec'].astype(float)
neighbors = self.graph.neighbors(u)
fvecs = numpy.array([self.graph.node[f]['lvec'] for f in neighbors])
fvecs = fvecs.astype(float)
q, r = numpy.linalg.qr(fvecs.T)
p = []
for f in xrange(numpy.shape(q)[1]):
p.append(numpy.dot(q[:, f], uvec)*q[:, f])
p = numpy.sum(numpy.array(p), 0)
p_norm = numpy.linalg.norm(p) / numpy.linalg.norm(uvec)
self.graph.node[u]['pred'] = p_norm
def calculate_st(self, tidx):
'''
Compute S(t) at a given time.
Args:
tidx - the index of the time ranges to compute S(t)
'''
print 'Calculating S(t).'
s = numpy.sum(networkx.get_node_attributes(self.graph, 'S').values())
s = s / float(self.graph.number_of_nodes())
self.St[tidx] = s
def randomht(n, xmin, alpha, tau):
'''Returns an list of numbers from a power law distribution with an
exponential cutoff.
Adapted from: http://tuvalu.santafe.edu/~aaronc/powerlaws/
Args:
n - the number of values to return
xmin - the minimum value to return
alpha - the exponent of the power law part of the distribution
tau - the exponent of the cutoff
Returns:
x - a list of random numbers generated from the distribution
'''
x = []
y=[]
for i in range(10*n):
y.append(xmin - (1./tau)*numpy.log(1.-numpy.random.rand()))
while True:
ytemp=[]
for i in range(10*n):
if numpy.random.rand()<pow(y[i]/float(xmin),-alpha):
ytemp.append(y[i])
y = ytemp
x = x+y
q = len(x)-n
if q==0:
break;
if (q>0):
r = range(len(x))
random.shuffle(r)
xtemp = []
for j in range(len(x)):
if j not in r[0:q]:
xtemp.append(x[j])
x=xtemp
break;
if (q<0):
y=[]
for j in range(10*n):
y.append(xmin - (1./tau)*numpy.log(1.-numpy.random.rand()))
return x
def cosine_similarity(u, v):
'''Computes the cosine similarity between two vectors
Args:
u - a numpy vector
v - a numpy vector
Returns:
cosine_similarity - float, the cosine similarity between u and v
'''
u = u.astype(float)
v = v.astype(float)
return numpy.dot(u, v)/(numpy.linalg.norm(u)*numpy.linalg.norm(v))
def plot_similarity(graph, legend=False, label='', color='k', ax=None,
axis='semilogy', xlim=[0,1], ylim=None, marker='o'):
'''Plots the distribution of mobility similarity values for a graph.
The user can control the style of the plot with optional arguments and can
add this distribution to a previously created axis by passing the handle
as a keyword argument.
Args:
graph - a networkx graph object where edges have a sim attributes
Returns:
ax - an axis object with the distribution added.
'''
# Create an axes if one is not provided.
if not ax:
figure = pyplot.figure()
ax = figure.add_subplot(1, 1, 1)
# Compute the distribution
data = numpy.array(networkx.get_edge_attributes(graph, 'sim').values())
xbins = numpy.linspace(0, 1, num=30)
x = xbins[1:] - (xbins[1:]-xbins[:-1])/2.
y, edges = numpy.histogram(data, xbins, density=True)
# Plot the distribution on the axis.
if axis == 'semilogy':
ax.semilogy(x, y, '-', color=color, mew=0, label=label, marker=marker)
elif axis == 'loglog':
ax.loglog(x, y, '-', color=color, mew=0, label=label, marker=marker)
else:
ax.plot(x, y, '-', color=color, mew=0, label=label, marker=marker)
if legend:
ax.legend(frameon=False, loc='best')
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
ax.set_ylabel(r'$p(cos\theta)$')
ax.set_xlabel(r'$cos\theta$')
return ax
def plot_predictability(graph, legend=False, label='', color='k', ax=None,
axis='semilogy', xlim=[0, 1], marker='o'):
'''Plots the distribution of predicatability values for nodes in a graph.
The user can control the style of the plot with optional arguments and can
add this distribution to a previously created axis by passing the handle
as a keyword argument.
Args:
graph - a networkx graph object where nodes have a pred attributes
Returns:
ax - an axis object with the distribution added.
'''
# Create an axes if one is not provided.
if not ax:
figure = pyplot.figure()
ax = figure.add_subplot(1, 1, 1)
# Compute the distribution.
data = numpy.array(networkx.get_node_attributes(graph, 'pred').values())
xbins = numpy.linspace(0, 1, num=30)
x = xbins[1:] - (xbins[1:]-xbins[:-1])/2.
y, edges = numpy.histogram(data, xbins, density=True)
# Plot the distribition on the axis.
if axis == 'semilogy':
ax.semilogy(x, y, '-', color=color, mew=0, label=label, marker=marker)
elif axis == 'loglog':
ax.loglog(x, y, '-', color=color, mew=0, label=label, marker=marker)
else:
ax.plot(x, y, '-', color=color, mew=0, label=label, marker=marker)
if legend:
ax.legend(frameon=False, loc='best')
ax.set_xlim(xlim)
ax.set_xlabel(r"$\frac{|\hat{\mathbf{v}}|}{|\mathbf{v}|}$")
ax.set_ylabel(r"$p(\frac{|\hat{\mathbf{v}}|}{|\mathbf{v}|})$")
return ax
def plot_fk(graph, legend=False, label='', color='k', ax=None,
axis='loglog', xlim=None, ylim=None, marker='o'):
'''Plots the location visit frequencies, f_k, of nodes.
The user can control the style of the plot with optional arguments and can
add this distribution to a previously created axis by passing the handle
as a keyword argument.
Args:
graph - a networkx graph object where nodes have a lvec attributes
Returns:
ax - an axis object with the distribution added.
'''
# Create an axes if one is not provided.
if not ax:
figure = pyplot.figure()
ax = figure.add_subplot(1, 1, 1)
# Compute the visit frequencies.
data = numpy.array(networkx.get_node_attributes(graph, 'lvec').values())
data = data.astype(float) / numpy.sum(data, 1)[:,None]
data.sort(axis=1)
data = numpy.ma.masked_equal(data, 0)
freq = numpy.ma.mean(data,0)
# Plot
if axis == 'loglog':
ax.loglog(numpy.arange(1, len(freq)+1),
freq[::-1], '-', color=color, mew=0, label=label,
marker=marker)
else:
ax.plot(numpy.arange(1, len(freq)+1),
freq[::-1], '-', color=color, mew=0, label=label,
marker=marker)
if legend:
ax.legend(frameon=False, loc='best')
if xlim:
ax.set_xlim(xlim)
if ylim:
ax.set_ylim(ylim)
ax.set_xlabel(r'$k$')
ax.set_ylabel(r'$f_k$')
return ax
| mit |
roshantha9/AbstractManycoreSim | src/analyse_results/AnalyseResults_Exp_HRTVid_strmUtil_vs_sched_INDIN2015.py | 1 | 33885 | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from collections import OrderedDict
import numpy as np
import traceback
import re
#import pylab
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
#plt.style.use('bmh_rosh')
#import seaborn as sns
import seaborn.apionly as sns
import scipy.stats
import json
from matplotlib import colors
import matplotlib.cm as cm
plt.style.use('bmh_rosh')
#from SimParams import SimParams
from util_scripts.resolution_combos import generate_resolution_combos
NOC_H = 3
NOC_W = 3
MAX_CC_LEVEL = 4000000
RANDOM_SEEDS = [80505, 1234, 81665, 33749, 43894, 26358, 70505, \
83660, 22817, 70263, 29917, 26044, \
76891, 50399, 64217, \
44117, 57824, 42267, 83200, 99108, \
95928, 53864, 44289, 77379, 80521, \
88117, 23327, 73337, 94064, 31982, 22250, \
6878, 66093, 69541, 18065, 74076, 98652, 21149, 42198, 5558]
# for testing
#RANDOM_SEEDS = [81665, 33749, 43894, 26358, 80505, \
#83660, 22817, 70263, 29917, 26044, \
#76891]
RANDOM_SEEDS=\
[
# batch 1
81665, 33749, 43894, 26358, 80505, 83660, 22817, 70263, 29917, 26044, 76891,
# batch 2
50399, 64217, 44117, 57824, 42267, 83200, 99108, 95928, 53864, 44289,
# batch 3
77379, 80521, 88117, 23327, 73337, 94064, 31982, 22250,
#batch 4
#6878, 66093, 69541, 18065, 74076, 98652, 21149, 42198, 5558, 70505, 1234
6878
]
# correct locations used for thesis draft
#FNAME_DATA_OUT_ADMINRATES = "../experiment_data/hrt_video/HRTVIDShort_data_260716/data_HRTVIDSHORT_Load_vs_VSInfo.js"
#FNAME_DATA_OUT_UTIL = "../experiment_data/hrt_video/HRTVIDShort_data_260716/data_HRTVIDSHORT_Load_vs_OverallSysUtil.js"
# location after bl correction
FNAME_DATA_OUT_ADMINRATES = "../experiment_data/hrt_video/HRTVIDShort_data_BLTest/data_HRTVIDSHORT_Load_vs_VSInfo.js"
FNAME_DATA_OUT_UTIL = "../experiment_data/hrt_video/HRTVIDShort_data_BLTest/data_HRTVIDSHORT_Load_vs_OverallSysUtil.js"
#GLOBAL_COLS = sns.color_palette("Reds", 3)[0:2] # proposed (always 1 more)
#GLOBAL_COLS.extend(sns.color_palette("Blues", 3)[0:2]) # baselines PP, BN (always 1 more)
#GLOBAL_COLS.extend(sns.color_palette("Greens", 3)[0:2]) # baselines LU, LM (always 1 more)
#GLOBAL_COLS = sns.color_palette("Paired", 6)
#GLOBAL_COLS.reverse()
GLOBAL_COLS = {
#'r', '#fb9a99', # reds
#'b', '#80b1d3', # blues
#'g', '#a6d854', # greens
"IPC":'r',
"LWCRS":'#fb9a99',
"PP":'#08306b',
"BN":'#2171b5',
"LU": '#6baed6',
"LM": '#c6dbef',
}
GLOBAL_LINESTYLES = {
"IPC" :'-',
"LWCRS":'-',
"PP":'--',
"BN":'-',
"LU": '--',
"LM": '-',
}
GLOBAL_LINEWIDTH = 2.5
print len(GLOBAL_COLS)
NOC_H = 3
NOC_W = 3
print len(RANDOM_SEEDS)
#sys.exit()
#RANDOM_SEEDS = [81665, 76891]
# correct locations used for thesis draft
#EXP_DATADIR = "../experiment_data/hrt_video/util_vs_sched/"
#EXP_DATADIR = "Z:/MCASim/experiment_data/hrt_video/util_vs_sched/seed_70505/"
#EXP_DATADIR = "Z:/MCASim/experiment_data/hrt_video/util_vs_sched_230415/" # for thesis draft
# location after bl correction
#EXP_DATADIR = "Z:/Simulator_versions_perExperiment/ThesisTechCh5_INDIN/src/experiment_data/hrt_video/util_vs_sched/"
EXP_DATADIR = "../experiment_data/hrt_video/util_vs_sched/"
global_types_of_tests = [
## Deterministic - AC ##
#{'ac':11, 'mp':0, 'pr':4, 'cmb':840 }, # determ, lumm
{'ac':11, 'mp':0, 'pr':4, 'cmb':841 , 'lbl': "IPC"}, # determ, improved
{'ac':11, 'mp':0, 'pr':4, 'cmb':842 , 'lbl': "LWCRS"}, # determ, improved
{'ac':11, 'mp':0, 'pr':4, 'cmb':833 , 'lbl': "PP"}, # determ, improved
{'ac':11, 'mp':0, 'pr':4, 'cmb':834 , 'lbl': "BN"}, # determ, improved
##{'ac':11, 'mp':0, 'pr':4, 'cmb':832 , 'lbl': "BN"}, # determ, improved - not used
{'ac':11, 'mp':10, 'pr':4, 'cmb':0 , 'lbl': "LU"}, # determ, improved
{'ac':11, 'mp':12, 'pr':4, 'cmb':0 , 'lbl': "LM"}, # determ, improved
]
global_admission_rate_eq_zero = {}
MP_ORDER = ["IPC", "LWCRS", "PP", "BN", "LU", "LM"]
#MP_ORDER = ["IPC", "LWCRS"]
def plot_Load_vs_VSInfo(normal_plot = True, bin_plot = False, load_data_from_file=False):
global global_admission_rate_eq_zero
## get data from experiment files
if(load_data_from_file==False):
# get res_combos
rand_seed = 1234
res_arr = [(720,576), (544,576), (528,576), (480,576), (426,240), (320,240), (240,180), (230,180)]
max_num_wfs = 10
res_combos = generate_resolution_combos(max_num_wfs-1,res_arr,rand_seed, sampled=True)
#print len(res_combos)
all_exp_results_listbased = OrderedDict()
colors = ['b', 'r', 'b', 'r']
markers = ['x', 'x', 'o', 'o']
for each_test_type in global_types_of_tests:
exp_key = "AC"+ str(each_test_type['ac']) + "_" + \
"MP"+ str(each_test_type['mp']) + "_" + \
"PR"+ str(each_test_type['pr']) + "_" + \
"CMB"+ str(each_test_type['cmb'])
exp_key = each_test_type['lbl']
all_exp_results_listbased[exp_key] = []
#print "exp_key = " + exp_key
count_temp = 0
for rix, each_res_combo in enumerate(res_combos):
print "rix =", rix
cc = each_res_combo['cc']
if cc > MAX_CC_LEVEL:
continue
#print cc
cc_uid = each_res_combo['cc_uid']
all_admission_ratio = []
for each_seed in RANDOM_SEEDS:
print "Get VSInfo : exp_key = " + str(exp_key) + ", cc = " + str(cc) + ", seed = " + str(each_seed)
try:
subdir = "seed_" + str(each_seed) + "/" +"ac"+str(each_test_type['ac'])+"mp"+str(each_test_type['mp'])+"pr"+str(each_test_type['pr'])+"cmb"+str(each_test_type['cmb'])+"/"
# get data for no-ac, LUM
prefix = "HRTVid_" + \
"AC" + str(each_test_type['ac']) + "_" + \
"MP" + str(each_test_type['mp']) + "_" + \
"PR" + str(each_test_type['pr']) + "_" + \
"CMB" + str(each_test_type['cmb']) + "_" + \
"cc" + str(cc) + "-" + str(cc_uid) + "_"
fname_prefix = EXP_DATADIR+subdir+prefix+str(NOC_H)+"_"+str(NOC_W)+"_"
## get video accept/reject/late list
fname = fname_prefix+"test__vsbs.js"
#fname = fname_prefix+"test__vsbs.js"
json_data=open(fname)
file_data = json.load(json_data)
num_vidstrm_accepted_but_late = file_data["num_vids_accepted_late"]
num_vidstrm_dropped_tasks = file_data["num_dropped_tasks"]
num_vidstrm_rejected = file_data["num_vids_rejected"]
num_vidstrm_accepted_success = file_data["num_vids_accepted_success"]
total_num_streams = num_vidstrm_accepted_but_late + num_vidstrm_rejected + num_vidstrm_accepted_success
# check late streams
if(num_vidstrm_accepted_but_late != 0):
print "--"
print num_vidstrm_accepted_but_late
print total_num_streams
print fname
print "--"
count_temp +=1
#sys.exit("error")
if(num_vidstrm_accepted_success==0 and num_vidstrm_accepted_but_late==1):
num_vidstrm_accepted_success = 1
admitted_ratio = float(float(num_vidstrm_accepted_success) / float(total_num_streams))
else:
admitted_ratio = float(float(num_vidstrm_accepted_success) / float(total_num_streams))
if(admitted_ratio == 0):
dkey = str(each_test_type['ac']) + "_" + \
str(each_test_type['mp']) + "_" + \
str(each_test_type['pr']) + "_" + \
str(each_test_type['cmb']) + "_" + \
str(each_seed) + "_" + \
str(cc) + "_" + \
str(cc_uid)
global_admission_rate_eq_zero[dkey] = {
"AC" : each_test_type['ac'],
"MP" : each_test_type['mp'],
"PR" : each_test_type['pr'],
"CMB" : each_test_type['cmb'],
'seed' : each_seed,
'fname' : fname,
'total_num_streams' : total_num_streams,
'cc' : str(cc),
'ccuid' : str(cc_uid),
'res_list' : each_res_combo['res_list']
}
# record metric
all_admission_ratio.append(admitted_ratio)
except Exception, e:
print "exception"
print e
tb = traceback.format_exc()
print tb
sys.exit()
entry = {
'cc' : cc,
'res_list': each_res_combo['res_list'],
'avg_cc': each_res_combo['avg_cc'],
#'utilisation_taskset' : utilisation_of_all_tasks,
#'num_vidstrm_accepted_but_late' : num_vidstrm_accepted_but_late,
#'num_vidstrm_dropped_tasks' : num_vidstrm_dropped_tasks,
#'num_vidstrm_rejected' : num_vidstrm_rejected,
#'num_vidstrm_accepted_success' : num_vidstrm_accepted_success,
#'num_total_streams' : total_num_streams,
'admitted_ratio_allseeds' : all_admission_ratio,
'mean_admitted_ratio_allseeds' : np.mean(all_admission_ratio)
}
all_exp_results_listbased[exp_key].append(entry)
print count_temp
#pprint.pprint(global_admission_rate_eq_zero)
# save results
_write_formatted_file(FNAME_DATA_OUT_ADMINRATES, all_exp_results_listbased, "json")
## get data from dump file
else:
fname = FNAME_DATA_OUT_ADMINRATES
json_data=open(fname)
file_data = json.load(json_data)
all_exp_results_listbased = file_data
if (bin_plot == True):
#######################################
### step plot with equal width bins ###
## now we plot
fig = plt.figure(figsize=(7*1.2, 5*1.2))
fig.canvas.set_window_title('plot_Util_vs_VSInfo')
ax = plt.subplot(111)
i = 0
legend_bars = []
scatter_colors = plt.get_cmap('jet')(np.linspace(0, 1.0, len(global_types_of_tests))) # used for INDIN
#scatter_colors = ["#FF0000", "#00FF00" , "#0000FF", "#FFA200", "#FF00F7", "#00FBFF"]
#scatter_colors = GLOBAL_COLS
scatter_markers = ["1", "2", "3", "4", "8", "D", "o", "x", "s", "*", "+"]
positions = OrderedDict()
binwidth = 50000
#crop_min = 500000
crop_min = 0
crop_max = 2230000
exp_metric = 'mean_admitted_ratio_allseeds'
#for k, each_exp in all_exp_results_listbased.iteritems():
for k in MP_ORDER:
each_exp = all_exp_results_listbased[k]
label = k
# sorted_results
sorted_results = sorted(each_exp, key=lambda k: k['cc'])
sorted_cc_list = [x['cc'] for x in sorted_results]
all_admitted_ratio = [x[exp_metric]*float(100.0) for x in sorted_results]
print sorted_cc_list
print len(set(sorted_cc_list))
x_data = sorted_cc_list
y_data = all_admitted_ratio
bins = np.arange(np.min(sorted_cc_list), np.max(sorted_cc_list) + binwidth, binwidth) # create equal width bins
if(positions == {}):
for ix, each_bin in enumerate(bins):
if(ix<(len(bins)-1)):
temp_pos = [jx for jx, v in enumerate(x_data) if (v < bins[ix+1] and v > bins[ix])]
if temp_pos != []:
positions[each_bin] = temp_pos
y_chunks = []
for b, each_pos in positions.iteritems():
y_chunks.append([v for ix, v in enumerate(y_data) if ix in each_pos])
x = positions.keys()
y = [ np.mean(q) for q in y_chunks]
plt.step(x,y, color=GLOBAL_COLS[k], label=k, linestyle=GLOBAL_LINESTYLES[k], linewidth=GLOBAL_LINEWIDTH)
plt.hold(True)
x_data_ix_gt_q = [ix for (ix,val) in enumerate(x) if val>crop_min][0]
x_data_ix_lt_q = [ix for (ix,val) in enumerate(x) if val<crop_max][-1]
# cropped_data
x_data_cropped = x[x_data_ix_gt_q:x_data_ix_lt_q]
y_data_cropped = y[x_data_ix_gt_q:x_data_ix_lt_q]
y_data_cropped = [q if ~np.isnan(q) else 0.0 for q in y_data_cropped]
i+=1
#plt.grid(b=True, which='major', color='k', linestyle='-', alpha=0.5)
#plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.5)
#plt.grid(b=True, which='both')
plt.minorticks_on()
#plt.yscale('log')
#plt.xscale('log')
leg = plt.legend(fontsize=14)
leg.draggable(True)
ax.tick_params(axis = 'both', which = 'both', labelsize=14)
ax.set_ylabel('Mean admission rate (%)')
ax.set_xlabel('Workload bins (bin_width=%d)' % binwidth)
ax.xaxis.major.formatter._useMathText = True
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0), labelsize=20)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.rc('font', **{'size':'16'})
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.09)
def plot_Load_vs_OverallSysUtil(normal_plot = True, bin_plot = False, load_data_from_file=False):
## get data from experiment files
if(load_data_from_file==False):
# get res_combos
rand_seed = 1234
res_arr = [(720,576), (544,576), (528,576), (480,576), (426,240), (320,240), (240,180), (230,180)]
max_num_wfs = 10
res_combos = generate_resolution_combos(max_num_wfs-1,res_arr,rand_seed,sampled=True)
all_exp_results_listbased = OrderedDict()
colors = ['b', 'r', 'b', 'r']
markers = ['x', 'x', 'o', 'o']
for each_test_type in global_types_of_tests:
exp_key = "AC"+ str(each_test_type['ac']) + "_" + \
"MP"+ str(each_test_type['mp']) + "_" + \
"PR"+ str(each_test_type['pr']) + "_" + \
"CMB"+ str(each_test_type['cmb'])
exp_key = each_test_type['lbl']
all_exp_results_listbased[exp_key] = []
for each_res_combo in res_combos:
cc = each_res_combo['cc']
if cc > MAX_CC_LEVEL:
continue
cc_uid = each_res_combo['cc_uid']
all_util_ratio = []
for each_seed in RANDOM_SEEDS:
print "Get Util : exp_key = " + str(exp_key) + ", cc = " + str(cc) + ", seed = " + str(each_seed)
try:
subdir = "seed_" + str(each_seed) + "/" +"ac"+str(each_test_type['ac'])+"mp"+str(each_test_type['mp'])+"pr"+str(each_test_type['pr'])+"cmb"+str(each_test_type['cmb'])+"/"
# get data for no-ac, LUM
prefix = "HRTVid_" + \
"AC" + str(each_test_type['ac']) + "_" + \
"MP" + str(each_test_type['mp']) + "_" + \
"PR" + str(each_test_type['pr']) + "_" + \
"CMB" + str(each_test_type['cmb']) + "_" + \
"cc" + str(cc) + "-" + str(cc_uid) + "_"
fname_prefix = EXP_DATADIR+subdir+prefix+str(NOC_H)+"_"+str(NOC_W)+"_"
## get utilisation value
fname = fname_prefix+"test__utilshort.js"
json_data=open(fname)
file_data = json.load(json_data)
if (_is_zero_admin_rate(
each_test_type['ac'],
each_test_type['mp'],
each_test_type['pr'],
each_test_type['cmb'],
cc,
cc_uid) == True):
#simulation_time = file_data['node_idle_time'][-1]['time']
simulation_time = file_data['node_idle_time']['time']
all_nodes_idle_times = []
overall_system_busy_percentage = 0.0
else:
# idle time counter
#all_nodes_idle_times = file_data['node_idle_time'][-1]['it_c']
#simulation_time = file_data['node_idle_time'][-1]['time']
all_nodes_idle_times = file_data['node_idle_time']['it_c']
simulation_time = file_data['node_idle_time']['time']
overall_system_busy_percentage = (1.0-float(float(np.mean(all_nodes_idle_times))/float(simulation_time)))*100.0
all_util_ratio.append(overall_system_busy_percentage)
except Exception, e:
#print e
tb = traceback.format_exc()
print tb
entry = {
'cc' : cc,
'res_list': each_res_combo['res_list'],
#'all_nodes_idle_times' : all_nodes_idle_times,
#'simulation_time' : simulation_time,
#'overall_system_busy_percentage' : overall_system_busy_percentage,
'all_util_ratio_all_seeds' : all_util_ratio,
'mean_all_util_ratio_allseeds' : np.mean(all_util_ratio)
}
all_exp_results_listbased[exp_key].append(entry)
# save results
_write_formatted_file(FNAME_DATA_OUT_UTIL, all_exp_results_listbased, "json")
## get data from dump file
else:
fname = FNAME_DATA_OUT_UTIL
json_data=open(fname)
file_data = json.load(json_data)
all_exp_results_listbased = file_data
if(bin_plot == True):
####################
### bin plot ###
## now we plot
fig = plt.figure(figsize=(7*1.2, 5*1.2))
#fig.canvas.set_window_title('plot_Load_vs_OverallSysUtil')
ax = plt.subplot(111)
i = 0
legend_bars = []
positions = OrderedDict()
scatter_colors = plt.get_cmap('jet')(np.linspace(0, 1.0, len(global_types_of_tests)))
scatter_markers = ["1", "2", "3", "4", "8", "D", "o", "x", "s", "*", "+"]
exp_metric = 'mean_all_util_ratio_allseeds'
for k in MP_ORDER:
each_exp = all_exp_results_listbased[k]
label = k
# sorted_results
sorted_results = sorted(each_exp, key=lambda k: k['cc'])
sorted_cc_list = [x['cc'] for x in sorted_results]
sorted_overall_system_busy_percentage = [x[exp_metric] for x in sorted_results]
x_data = sorted_cc_list
y_data = sorted_overall_system_busy_percentage
binwidth = 50000
bins = np.arange(np.min(sorted_cc_list), np.max(sorted_cc_list) + binwidth, binwidth)
if(positions == {}):
for ix, each_bin in enumerate(bins):
if(ix<(len(bins)-1)):
temp_pos = [jx for jx, v in enumerate(x_data) if v < bins[ix+1] and v > bins[ix]]
if temp_pos != []:
positions[each_bin] = temp_pos
y_chunks = []
for b, each_pos in positions.iteritems():
y_chunks.append([v for ix, v in enumerate(y_data) if ix in each_pos])
x = positions.keys()
y = [ np.mean(q) for q in y_chunks]
plt.step(x,y, color=GLOBAL_COLS[k], label=k, linestyle=GLOBAL_LINESTYLES[k], linewidth=GLOBAL_LINEWIDTH)
plt.hold(True)
crop_min = 500000
crop_max = 2230000
x_data_ix_gt_q = [ix for (ix,val) in enumerate(x) if val>crop_min][0]
x_data_ix_lt_q = [ix for (ix,val) in enumerate(x) if val<crop_max][-1]
# cropped_data
x_data_cropped = x[x_data_ix_gt_q:x_data_ix_lt_q]
y_data_cropped = y[x_data_ix_gt_q:x_data_ix_lt_q]
y_data_cropped = [q if ~np.isnan(q) else 0.0 for q in y_data_cropped]
i+=1
#plt.grid(b=True, which='major', color='k', linestyle='-', alpha=0.5)
#plt.grid(b=True, which='minor', color='k', linestyle='--', alpha=0.5)
plt.minorticks_on()
#plt.yscale('log')
#plt.xscale('log')
leg = plt.legend(fontsize=14)
leg.draggable(True)
ax.tick_params(axis = 'both', which = 'both', labelsize=14)
ax.set_ylabel('Mean PE busy time (%)')
ax.set_xlabel('Workload bins (bin_width=%d)' % binwidth)
ax.xaxis.major.formatter._useMathText = True
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
plt.tick_params(axis='both', which='major', labelsize=14)
plt.tick_params(axis='both', which='minor', labelsize=14)
plt.rc('font', **{'size':'16'})
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.09)
def _write_formatted_file(fname, data, format):
if(format == "pretty"):
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
elif(format == "json"):
logfile=open(fname, 'w')
json_data = json.dumps(data)
logfile.write(json_data)
else:
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
def _is_zero_admin_rate(ac,mp, pr, cmb, cc, ccuid):
dkey = str(ac) + "_" + \
str(mp) + "_" + \
str(pr) + "_" + \
str(cmb) + "_" + \
str(cc) + "_" + \
str(ccuid)
if dkey in global_admission_rate_eq_zero:
return True
else:
return False
def plot_Util_vs_Schedulability():
# get res_combos
rand_seed = 1234
res_arr = [(720,576), (544,576), (528,576), (480,576), (426,240), (320,240), (240,180)]
max_num_wfs = 7
res_combos = generate_resolution_combos(max_num_wfs-1,res_arr,rand_seed)
all_exp_results_listbased = {}
colors = ['b', 'r']
for each_test_type in global_types_of_tests:
exp_key = "AC"+ str(each_test_type['ac']) + "_" + \
"MP"+ str(each_test_type['mp']) + "_" + \
"PR"+ str(each_test_type['pr']) + "_" + \
"CMB"+ str(each_test_type['cmb'])
all_exp_results_listbased[exp_key] = []
utilisation_list = []
schedulablability_actual = []
schedulablability_analytical = []
for each_res_combo in res_combos:
try:
cc = each_res_combo['cc']
subdir = "ac"+str(each_test_type['ac'])+"mp"+str(each_test_type['mp'])+"pr"+str(each_test_type['pr'])+"cmb"+str(each_test_type['cmb'])+"/"
# get data for no-ac, LUM
prefix = "HRTVid_" + \
"AC"+ str(each_test_type['ac']) + "_" + \
"MP"+ str(each_test_type['mp']) + "_" + \
"PR"+ str(each_test_type['pr']) + "_" + \
"CMB"+ str(each_test_type['cmb']) + "_" +\
"cc"+ str(cc) + "_"
fname_prefix = EXP_DATADIR+subdir+prefix+str(NOC_H)+"_"+str(NOC_W)+"_"
#print fname_prefix
# ACTUAL : how many gops were schedulable ?
fname = fname_prefix+"test__gopsopbuffsumm.js"
json_data=open(fname)
file_data = json.load(json_data)
total_num_gops = len(file_data)
count_late_gops = 0
for k, each_gop in file_data.iteritems():
if(each_gop['gop_execution_lateness'] > 0):
count_late_gops+=1
actual_schedulable_gop_percentage = 1.0- float(float(count_late_gops)/float(total_num_gops))
schedulablability_actual.append(actual_schedulable_gop_percentage)
# ANALYTICAL : how many gops were analytically deemed schedulable ?
fname = fname_prefix+"test__utilvsschedresults.js"
json_data=open(fname)
file_data = json.load(json_data)
# we only look at the last entry, because that has all the vids that entered
#pprint.pprint(file_data)
utilisation_of_all_tasks = file_data[-1]['all_task_util'][1]
utilisation_list.append(utilisation_of_all_tasks)
num_gops_late = 0
total_num_gops = 0
for each_vid in file_data[-1]['vid_streams_wcrt_info']:
strm_key = each_vid[0]
strm_res = each_vid[1]
strm_wcet = each_vid[2]
strm_num_gops = each_vid[3]
if(strm_wcet > 0.48):
num_gops_late += strm_num_gops
total_num_gops += strm_num_gops
analytically_schedulable_gop_percentage = 1.0 - float(float(num_gops_late)/float(total_num_gops))
schedulablability_analytical.append(analytically_schedulable_gop_percentage)
entry = {
'cc' : cc,
'res_list': each_res_combo['res_list'],
'avg_cc': each_res_combo['avg_cc'],
'utilisation_taskset' : utilisation_of_all_tasks,
'actual_schedulable_gop_percentage' : actual_schedulable_gop_percentage,
'analytically_schedulable_gop_percentage' : analytically_schedulable_gop_percentage
}
all_exp_results_listbased[exp_key].append(entry)
except Exception, e:
print e
## now we plot
fig = plt.figure()
fig.canvas.set_window_title('plot_Util_vs_Schedulability')
ax = plt.subplot(111)
i = 0
for k, each_exp in all_exp_results_listbased.iteritems():
label = k
# sorted_results
sorted_results = sorted(each_exp, key=lambda k: k['utilisation_taskset'])
#sorted_results = sorted(each_exp, key=lambda k: k['cc'])
sorted_cc_list = [x['cc'] for x in sorted_results]
sorted_util_list = [x['utilisation_taskset'] for x in sorted_results]
sorted_avgcc_list = [x['avg_cc'] for x in sorted_results]
sorted_shed_actual = [x['actual_schedulable_gop_percentage'] for x in sorted_results]
sorted_shed_analytical = [x['analytically_schedulable_gop_percentage'] for x in sorted_results]
x_data = sorted_cc_list
y_data = sorted_shed_analytical
plt.scatter(x_data, y_data, color=colors[i], marker='x', alpha=0.5)
plt.hold(True)
# regression
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x_data,y_data)
line = slope*np.array(x_data)+intercept
plt.plot(x_data,line, marker='', linestyle='--', linewidth=2,color=colors[i])
#p, residuals, rank, singular_values, rcond = np.polyfit(x_data, y_data, 5, full=True)
#fit_fn = np.poly1d(p)
#xtick = np.linspace(0, np.max(x_data), num=len(x_data))
#plt.plot(xtick,fit_fn(x_data),color=colors[i], marker='', linestyle='--', linewidth=2, label='Polynomial Regression fit')
#plt.scatter(sorted_util_list, sorted_shed_analytical, color='r', marker='x')
#plt.plot(sorted_util_list, yEXP, color='r', linestyle='--')
#plt.plot(sorted_util_list, sorted_shed_analytical, color='r', marker='x', linestyle='')
i+=1
plt.grid(True)
def func_fit_data(x, a, b, c):
return a * np.exp(-b * x) + c
###################################
# HELPERS
###################################
###################################
# MAIN
###################################
#plot_Util_vs_Schedulability()
plot_Load_vs_VSInfo(normal_plot=False, bin_plot=True, load_data_from_file=True)
plot_Load_vs_OverallSysUtil(normal_plot=False, bin_plot=True, load_data_from_file=True)
print "finished"
plt.show()
# class LogFormatterTeXExponent(pylab.LogFormatter, object):
# """Extends pylab.LogFormatter to use
# tex notation for tick labels."""
#
# def __init__(self, *args, **kwargs):
# super(LogFormatterTeXExponent,
# self).__init__(*args, **kwargs)
#
# def __call__(self, *args, **kwargs):
# """Wrap call to parent class with
# change to tex notation."""
# label = super(LogFormatterTeXExponent,
# self).__call__(*args, **kwargs)
# label = re.sub(r'e(\S)0?(\d+)',
# r'\\times 10^{\1\2}',
# str(label))
# label = "$" + label + "$"
# return label
| gpl-3.0 |
rahuldhote/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
Obus/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 162 | 7103 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
weight = X[rows[:, np.newaxis], cols].sum()
cut = (X[row_complement[:, np.newaxis], cols].sum() +
X[rows[:, np.newaxis], col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
gef756/scipy | tools/refguide_check.py | 29 | 23595 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
skip_types = (dict, str, unicode, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
def check_doctests(module, verbose, dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
# the namespace to run examples in
ns = {'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
# if MPL is available, use display-less backend
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim'}
def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(ns))
a_got = eval(got, dict(ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except TypeError:
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
# Loop over non-deprecated items
results = []
all_success = True
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
all_success = False
if have_matplotlib:
plt.close('all')
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
return results
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
args = parser.parse_args(argv)
modules = []
names_dict = {}
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
pdamodaran/yellowbrick | tests/test_model_selection/test_validation_curve.py | 1 | 5367 | # tests.test_model_selection.test_validation_curve
# Tests for the ValidationCurve visualizer
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sat Mar 31 06:25:05 2018 -0400
#
# ID: test_validation_curve.py [] [email protected] $
"""
Tests for the ValidationCurve visualizer
"""
##########################################################################
## Imports
##########################################################################
import sys
import pytest
import numpy as np
from unittest.mock import patch
from tests.base import VisualTestCase
from tests.dataset import DatasetMixin
from sklearn.svm import SVC
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import ShuffleSplit, StratifiedKFold
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.model_selection.validation_curve import *
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Test Cases
##########################################################################
@pytest.mark.usefixtures("classification", "regression", "clusters")
class TestValidationCurve(VisualTestCase, DatasetMixin):
"""
Test the ValidationCurve visualizer
"""
@patch.object(ValidationCurve, 'draw')
def test_fit(self, mock_draw):
"""
Assert that fit returns self and creates expected properties
"""
X, y = self.classification
params = (
"train_scores_", "train_scores_mean_", "train_scores_std_",
"test_scores_", "test_scores_mean_", "test_scores_std_"
)
oz = ValidationCurve(
SVC(), param_name="gamma", param_range=np.logspace(-6, -1, 5)
)
for param in params:
assert not hasattr(oz, param)
assert oz.fit(X, y) is oz
mock_draw.assert_called_once()
for param in params:
assert hasattr(oz, param)
@pytest.mark.xfail(
sys.platform == 'win32', reason="images not close on windows"
)
def test_classifier(self):
"""
Test image closeness on a classification dataset with kNN
"""
X, y = self.classification
cv = ShuffleSplit(3, random_state=288)
param_range = np.arange(3, 10)
oz = ValidationCurve(
KNeighborsClassifier(), param_name="n_neighbors",
param_range=param_range, cv=cv, scoring='f1_weighted',
)
oz.fit(X, y)
oz.poof()
self.assert_images_similar(oz)
def test_regression(self):
"""
Test image closeness on a regression dataset with a DecisionTree
"""
X, y = self.regression
cv = ShuffleSplit(3, random_state=938)
param_range = np.arange(3, 10)
oz = ValidationCurve(
DecisionTreeRegressor(random_state=23), param_name="max_depth",
param_range=param_range, cv=cv, scoring='r2',
)
oz.fit(X, y)
oz.poof()
self.assert_images_similar(oz, tol=12.0)
@pytest.mark.xfail(
sys.platform == 'win32', reason="images not close on windows"
)
def test_quick_method(self):
"""
Test validation curve quick method with image closeness on SVC
"""
X, y = self.classification
pr = np.logspace(-6, -1, 3)
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=321)
ax = validation_curve(
SVC(), X, y, logx=True, param_name='gamma', param_range=pr, cv=cv
)
self.assert_images_similar(ax=ax)
@pytest.mark.xfail(
sys.platform == 'win32', reason="images not close on windows"
)
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Test on mushroom dataset with pandas DataFrame and Series and NB
"""
df = self.load_pandas("mushroom")
target = "target"
features = [col for col in df.columns if col != target]
X = pd.get_dummies(df[features])
y = df[target]
assert isinstance(X, pd.DataFrame)
assert isinstance(y, pd.Series)
cv = StratifiedKFold(n_splits=2, random_state=11)
pr = np.linspace(0.1, 3.0, 6)
oz = ValidationCurve(
BernoulliNB(), cv=cv, param_range=pr, param_name='alpha'
)
oz.fit(X, y)
oz.poof()
self.assert_images_similar(oz)
@patch.object(ValidationCurve, 'draw')
def test_reshape_scores(self, mock_draw):
"""
Test supplying an alternate CV methodology and train_sizes
"""
X, y = self.classification
pr = np.logspace(-6, -1, 3)
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=14)
oz = ValidationCurve(SVC(), param_name='gamma', param_range=pr, cv=cv)
oz.fit(X, y)
assert oz.train_scores_.shape == (3, 5)
assert oz.test_scores_.shape == (3, 5)
def test_bad_train_sizes(self):
"""
Test learning curve with bad input for training size.
"""
with pytest.raises(YellowbrickValueError):
ValidationCurve(SVC(), param_name='gamma', param_range=100)
| apache-2.0 |
lwcook/horsetail-matching | examples/full_example.py | 1 | 2625 | import numpy as np
import scipy.optimize as scopt
import matplotlib.pyplot as plt
from horsetailmatching import HorsetailMatching, UniformParameter
from horsetailmatching import GaussianParameter, IntervalParameter
from horsetailmatching.demoproblems import TP2
from horsetailmatching.surrogates import PolySurrogate
def main():
u_1 = UniformParameter(lower_bound=-1, upper_bound=1)
u_2 = IntervalParameter(lower_bound=-1, upper_bound=1)
def fQOI(x, u):
return TP2(x, u, jac=True)
def ftarget_u(h):
return 0 - h**5
def ftarget_l(h):
return -1 - h**5
qPolyChaos = PolySurrogate(dimensions=2, order=3,
poly_type=['legendre', 'hermite'])
gradPolyChaos = [PolySurrogate(dimensions=2, order=3,
poly_type=['legendre', 'hermite']),
PolySurrogate(dimensions=2, order=3,
poly_type=['legendre', 'hermite'])]
u_quad_points = qPolyChaos.getQuadraturePoints()
def mySurrogateWithGrad(u_quad, q_quad, grad_quad):
qPolyChaos.train(q_quad)
for i, gPC in enumerate(gradPolyChaos):
gPC.train(grad_quad[:, i])
def qmodel(u):
return qPolyChaos.predict(u)
def gradmodel(u):
return [gPC.predict(u) for gPC in gradPolyChaos]
return qmodel, gradmodel
theHM = HorsetailMatching(fQOI, [u_1, u_2], jac=True,
ftarget=(ftarget_u, ftarget_l),
samples_prob=1000, samples_int=25,
integration_points=np.linspace(-10, 25, 500),
surrogate=mySurrogateWithGrad, surrogate_jac=True,
surrogate_points=u_quad_points,
kernel_type='uniform', verbose=True)
theHM.evalMetric([1, 1])
upper, lower, CDFs = theHM.getHorsetail()
for CDF in CDFs:
plt.plot(CDF[0], CDF[1], 'grey', lw=0.5)
plt.plot(upper[0], upper[1], 'b', label='initial')
plt.plot(lower[0], lower[1], 'b')
def myObj(x):
q, grad = theHM.evalMetric(x)
# theHM.plotHorsetail()
# plt.show()
return q, grad
solution = scopt.minimize(myObj, x0=[1, 1], jac=True, method='SLSQP',
constraints=[{'type': 'ineq', 'fun': lambda x: x[0]},
{'type': 'ineq', 'fun': lambda x: x[1]}])
print solution
upper, lower, CDFs = theHM.getHorsetail()
for CDF in CDFs:
plt.plot(CDF[0], CDF[1], 'grey', lw=0.5)
plt.plot(upper[0], upper[1], 'r', label='optimal')
plt.plot(lower[0], lower[1], 'r')
plt.legend(loc='lower right')
plt.show()
if __name__ == "__main__":
main()
| mit |
davidgardenier/frbpoppy | tests/dm_snr/frbpoppy_chime.py | 1 | 6300 | """Determine whether frbpoppy can explain CHIME results."""
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredText
import numpy as np
from scipy.stats import ks_2samp
from frbcat import ChimeRepeaters
from frbpoppy import Frbcat, split_pop, unpickle, hist
from tests.convenience import plot_aa_style, rel_path
SNR_LIMIT_ONE_OFFS = 10
SNR_LIMIT_REPS = 10
def get_frbcat_data():
"""Get all chime data from frbcat.
Returns:
dict: Two keys 'r' for repeater and 'o' for one-offs. Each
with entries for 'dm' and 'snr'
"""
fc = Frbcat(frbpoppy=False, repeaters=True, update=False)
chime_df = fc.df[fc.df.telescope == 'chime']
chime_df = chime_df.sort_values(by=['frb_name'])
frbcat = {'r': {}, 'o': {}}
# Chime one-offs
chime_o = chime_df.drop_duplicates(subset=['frb_name'], keep=False)
chime_o = chime_o[(chime_o.snr > SNR_LIMIT_ONE_OFFS)]
# Chime repeaters
chime_r = chime_df.loc[chime_df['frb_name'].duplicated(), :]
# Actually use the Chime repeaters database
chime_r = ChimeRepeaters().df.sort_values(by='name')
chime_r = chime_r[(chime_r.snr > SNR_LIMIT_REPS)]
# One DM value per repeater (used the average between bursts)
frbcat['r']['dm'] = chime_r.groupby('name').mean().reset_index().dm
frbcat['o']['dm'] = chime_o.dm
# All the different SNRs per repeater (or one_offs)
r_snr = chime_r.sort_values('timestamp').groupby('name').snr.first().values
frbcat['r']['snr'] = r_snr
frbcat['o']['snr'] = chime_o.snr
# Number of repeaters vs one offs
frbcat['r']['n'] = len(frbcat['r']['dm'])
frbcat['o']['n'] = len(frbcat['o']['dm'])
return frbcat
def get_frbpoppy_data():
"""Get frbpoppy data."""
surv_pop = unpickle('cosmic_chime')
# Split population into seamingly one-off and repeater populations
mask = ((~np.isnan(surv_pop.frbs.time)).sum(1) > 1)
pop_ngt1, pop_nle1 = split_pop(surv_pop, mask)
pop_ngt1.name += ' (> 1 burst)'
pop_nle1.name += ' (1 burst)'
# Limit to population above S/N limits
mask = (pop_ngt1.frbs.snr > SNR_LIMIT_REPS)
pop_ngt1.frbs.apply(mask)
mask = (pop_nle1.frbs.snr > SNR_LIMIT_ONE_OFFS)
pop_nle1.frbs.apply(mask)
print(f'{surv_pop.n_repeaters()} repeaters')
print(f'{surv_pop.n_one_offs()} one-offs')
frbpop = {'r': {}, 'o': {}}
for i, pop in enumerate((pop_ngt1, pop_nle1)):
t = 'o'
if i == 0:
t = 'r'
frbpop[t]['dm'] = pop.frbs.dm
# Take only the first snr
frbpop[t]['snr'] = pop.frbs.snr[:, 0]
return frbpop
def plot(frbcat, frbpop):
"""Plot distributions."""
# Change working directory
plot_aa_style(cols=2)
f, axes = plt.subplots(2, 2, sharex='col', sharey='row')
axes[1, 0].set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
axes[1, 1].set_xlabel(r'S/N')
axes[1, 1].set_xscale('log')
axes[1, 0].set_ylabel('Fraction')
axes[1, 0].set_yscale('log')
axes[1, 0].set_ylim(3e-2, 1.2e0)
axes[0, 0].set_ylabel('Fraction')
axes[0, 0].set_yscale('log')
axes[0, 0].set_ylim(3e-2, 1.2e0)
# Set colours
cmap = plt.get_cmap('tab10')([0, 1])
# Plot dm distribution
for i, p in enumerate((frbcat, frbpop)):
for t in ['r', 'o']:
# Line style
linestyle = 'solid'
label = 'one-offs'
alpha = 1
a = 0
if t == 'r':
linestyle = 'dashed'
label = 'repeaters'
a = 1
n_bins = 40
if len(p[t]['dm']) < 20:
n_bins = 10
bins = np.linspace(0, 2000, n_bins)
axes[a, 0].step(*hist(p[t]['dm'], norm='max', bins=bins),
where='mid', linestyle=linestyle, label=label,
color=cmap[i], alpha=alpha)
# Plot SNR distribution
bins = np.logspace(0.8, 3.5, n_bins)
axes[a, 1].step(*hist(p[t]['snr'], norm='max', bins=bins),
where='mid', linestyle=linestyle, label=label,
color=cmap[i], alpha=alpha)
for t in ['r', 'o']:
for p in ('dm', 'snr'):
row = 0
col = 0
if p == 'snr':
col = 1
if t == 'r':
row = 1
ks = ks_2samp(frbpop[t][p], frbcat[t][p])
print(t, p, ks)
text = fr'$p={round(ks[1], 2)}$'
if ks[1] < 0.01:
# text = r'$p < 0.01$'
text = fr'$p={round(ks[1], 3)}$'
anchored_text = AnchoredText(text, loc='upper right',
borderpad=0.5, frameon=False)
axes[row, col].add_artist(anchored_text)
# Set up layout options
f.subplots_adjust(hspace=0)
f.subplots_adjust(wspace=0.07)
# Add legend elements
elements = []
def patch(color):
return Patch(facecolor=color, edgecolor=color)
elements.append((patch(cmap[0]), 'Frbcat'))
elements.append((patch(cmap[1]), 'Frbpoppy'))
# Add line styles
elements.append((Line2D([0], [0], color='gray'), 'One-offs'))
elements.append((Line2D([0], [0], color='gray', linestyle='dashed'),
'Repeaters'))
lines, labels = zip(*elements)
lgd = plt.figlegend(lines, labels, loc='upper center', ncol=4,
framealpha=1, bbox_to_anchor=(0.485, 1.04),
columnspacing=1.1, handletextpad=0.3)
path = rel_path('./plots/frbpoppy_chime.pdf')
plt.savefig(path, bbox_extra_artists=(lgd,), bbox_inches='tight')
# Check p-value above S/N 15
for t in ['r', 'o']:
mask_frbpop = (frbpop[t]['snr'] > 15)
mask_frbcat = (frbcat[t]['snr'] > 15)
for par in ['dm', 'snr']:
ks = ks_2samp(frbpop[t][par][mask_frbpop],
frbcat[t][par][mask_frbcat])
print(t, par, ks, len(frbpop[t][par][mask_frbpop]),
len(frbcat[t][par][mask_frbcat]))
if __name__ == '__main__':
frbcat = get_frbcat_data()
frbpop = get_frbpoppy_data()
plot(frbcat, frbpop)
| mit |
alliemacleay/MachineLearning_CS6140 | Tests/hw4_tests.py | 1 | 4429 | from sklearn.metrics import roc_auc_score
__author__ = 'Allison MacLeay'
import CS6140_A_MacLeay.utils.Adaboost as adab
import CS6140_A_MacLeay.utils as utils
import CS6140_A_MacLeay.Homeworks.HW4 as decTree
import CS6140_A_MacLeay.Homeworks.HW3 as hw3
import CS6140_A_MacLeay.Homeworks.hw4 as hw4
import CS6140_A_MacLeay.Homeworks.HW4.plots as plt
import CS6140_A_MacLeay.Homeworks.HW4.data_load as dl
from sklearn import tree
from sklearn.datasets import load_iris, make_classification
import numpy as np
import os
def UnitTests():
#AdaboostErrorTest()
#AdaboostWrongTest()
#TestAbstract()
#changeWeight()
TreeTest2()
#TreeTest()
#testPlot()
#testBranchOptimal()
#dataloads()
def dataloads():
crx_data()
dl.data_q4()
def testPlot():
directory = '/Users/Admin/Dropbox/ML/MachineLearning_CS6140/CS6140_A_MacLeay/Homeworks'
path= os.path.join(directory, 'test.pdf')
plot = plt.Errors([[1,2,3]]).plot_all_errors(path)
def TestAbstract():
d = get_test_always_right()
ada = adab.AdaboostOptimal(1)
ada.run(d)
ada.print_stats()
def TreeTest():
spamDat = spamData()
k = 10
all_folds = hw3.partition_folds(spamDat, k)
num_in_fold = []
err_in_fold = []
for i in range(len(all_folds) - 1):
spam = all_folds[i]
num_in_fold.append(len(spam))
truth, f_data = decTree.split_truth_from_data(spam)
tree = decTree.TreeOptimal(max_depth=2)
#tree = decTree.TreeRandom()
tree.fit(f_data, truth)
print 'Prediction...\n'
predict = tree.predict(f_data)
print predict
print truth
error = 1. - hw3.get_accuracy(predict, truth)
err_in_fold.append(error)
print 'Tree error is: {}'.format(error)
spam = all_folds[k -1]
truth, f_data = decTree.split_truth_from_data(spam)
tree = decTree.TreeOptimal(max_depth=2)
#tree = decTree.TreeRandom()
tree.fit(f_data, truth)
predict = tree.predict(f_data)
error = 1. - hw3.get_accuracy(predict, truth)
sum_training_err = 0
for i in range(len(num_in_fold)):
sum_training_err += err_in_fold[i]
#sum_training_err += float(err_in_fold)/num_in_fold
average_training_error = float(sum_training_err)/len(num_in_fold)
print 'Average training error: {}\nAverage testing error: {}'.format(average_training_error, error)
def TreeTest2():
iris = load_iris()
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
print(roc_auc_score(y, clf.predict(X)))
clf2 = decTree.TreeOptimal()
clf2.fit(X, y)
print(roc_auc_score(y, clf2.predict(X)))
def testBranchOptimal():
data, truth = get_test_theta()
branch = decTree.BranchOptimal(data, truth, np.ones(len(data)))
theta = branch.choose_theta(data, truth)
if theta != 5.5:
print 'Optimal is broken! {} != 5.5'.format(theta)
else:
print 'Optimal works'
def AdaboostErrorTest():
print 'Always right'
spamData = get_test_always_right()
adaboost_run(spamData)
def AdaboostWrongTest():
print 'Always wrong'
d = get_test_always_wrong()
adaboost_run(d)
def changeWeight():
d = get_test_half_right()
adaboost_run(d, 3)
def adaboost_run(data, num_rounds=2):
adaboost = adab.AdaboostOptimal(num_rounds)
adaboost.run(data)
adaboost.print_stats()
def get_test_always_right():
d = np.ones(shape=(100, 2))
return d
def get_test_theta():
d = [10, 8, 8, 2, 2, 3, 0, 0, 0]
y = [-1, -1, -1, 1, 1, 1, -1, -1, -1]
return d, y
def get_test_always_wrong():
d = np.zeros(shape=(100, 2))
return d
def get_test_half_right():
d = np.ones(shape=(100, 2))
for i in range(len(d)/2):
d[i][-1] = 0
#print d
return d
def testData():
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
return X, y
def spamData():
return hw3.pandas_to_data(hw3.load_and_normalize_spambase())
def crx_data():
dl.data_q3_crx()
dl.data_q3_vote()
if __name__ == '__main__':
#decTree.q1()
#hw4.q1()
#UnitTests()
#hw4.q2()
#hw4.q3()
hw4.q4()
#hw4.q6()
#hw4.q7()
| mit |
eriklindernoren/ML-From-Scratch | mlfromscratch/unsupervised_learning/gaussian_mixture_model.py | 1 | 4723 | from __future__ import division, print_function
import math
from sklearn import datasets
import numpy as np
from mlfromscratch.utils import normalize, euclidean_distance, calculate_covariance_matrix
from mlfromscratch.utils import Plot
class GaussianMixtureModel():
"""A probabilistic clustering method for determining groupings among data samples.
Parameters:
-----------
k: int
The number of clusters the algorithm will form.
max_iterations: int
The number of iterations the algorithm will run for if it does
not converge before that.
tolerance: float
If the difference of the results from one iteration to the next is
smaller than this value we will say that the algorithm has converged.
"""
def __init__(self, k=2, max_iterations=2000, tolerance=1e-8):
self.k = k
self.parameters = []
self.max_iterations = max_iterations
self.tolerance = tolerance
self.responsibilities = []
self.sample_assignments = None
self.responsibility = None
def _init_random_gaussians(self, X):
""" Initialize gaussian randomly """
n_samples = np.shape(X)[0]
self.priors = (1 / self.k) * np.ones(self.k)
for i in range(self.k):
params = {}
params["mean"] = X[np.random.choice(range(n_samples))]
params["cov"] = calculate_covariance_matrix(X)
self.parameters.append(params)
def multivariate_gaussian(self, X, params):
""" Likelihood """
n_features = np.shape(X)[1]
mean = params["mean"]
covar = params["cov"]
determinant = np.linalg.det(covar)
likelihoods = np.zeros(np.shape(X)[0])
for i, sample in enumerate(X):
d = n_features # dimension
coeff = (1.0 / (math.pow((2.0 * math.pi), d / 2)
* math.sqrt(determinant)))
exponent = math.exp(-0.5 * (sample - mean).T.dot(np.linalg.pinv(covar)).dot((sample - mean)))
likelihoods[i] = coeff * exponent
return likelihoods
def _get_likelihoods(self, X):
""" Calculate the likelihood over all samples """
n_samples = np.shape(X)[0]
likelihoods = np.zeros((n_samples, self.k))
for i in range(self.k):
likelihoods[
:, i] = self.multivariate_gaussian(
X, self.parameters[i])
return likelihoods
def _expectation(self, X):
""" Calculate the responsibility """
# Calculate probabilities of X belonging to the different clusters
weighted_likelihoods = self._get_likelihoods(X) * self.priors
sum_likelihoods = np.expand_dims(
np.sum(weighted_likelihoods, axis=1), axis=1)
# Determine responsibility as P(X|y)*P(y)/P(X)
self.responsibility = weighted_likelihoods / sum_likelihoods
# Assign samples to cluster that has largest probability
self.sample_assignments = self.responsibility.argmax(axis=1)
# Save value for convergence check
self.responsibilities.append(np.max(self.responsibility, axis=1))
def _maximization(self, X):
""" Update the parameters and priors """
# Iterate through clusters and recalculate mean and covariance
for i in range(self.k):
resp = np.expand_dims(self.responsibility[:, i], axis=1)
mean = (resp * X).sum(axis=0) / resp.sum()
covariance = (X - mean).T.dot((X - mean) * resp) / resp.sum()
self.parameters[i]["mean"], self.parameters[
i]["cov"] = mean, covariance
# Update weights
n_samples = np.shape(X)[0]
self.priors = self.responsibility.sum(axis=0) / n_samples
def _converged(self, X):
""" Covergence if || likehood - last_likelihood || < tolerance """
if len(self.responsibilities) < 2:
return False
diff = np.linalg.norm(
self.responsibilities[-1] - self.responsibilities[-2])
# print ("Likelihood update: %s (tol: %s)" % (diff, self.tolerance))
return diff <= self.tolerance
def predict(self, X):
""" Run GMM and return the cluster indices """
# Initialize the gaussians randomly
self._init_random_gaussians(X)
# Run EM until convergence or for max iterations
for _ in range(self.max_iterations):
self._expectation(X) # E-step
self._maximization(X) # M-step
# Check convergence
if self._converged(X):
break
# Make new assignments and return them
self._expectation(X)
return self.sample_assignments
| mit |
krzysz00/momms | results/plot_multi_shape_experiment.py | 1 | 1722 | #!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
from sys import argv, stderr, exit
from math import ceil
plt.rcParams["figure.figsize"] = (9,7)
if len(argv) < 4:
print("{}: [data file] [algorithm name] [plot title] [[file]]".format(argv[0]),
file=stderr)
exit(1)
narrow_exper = pd.read_csv(argv[1], sep='\t', comment='#',
float_precision="high", header=None,
names=["m", "n", "k", "l",
argv[2], "Pair of gemm()",
"error"])
NARROW_DIM = 252
narrow_exper["Narrowed Dim."] = (narrow_exper[["m", "n", "k", "l"]] == NARROW_DIM).idxmax(axis=1)
narrow_exper["N"] = narrow_exper.apply(lambda row:
row["k"] if row["Narrowed Dim."] == "m"
else row["m"], axis=1)
narrow_exper2 = narrow_exper.copy()
narrow_exper2.set_index(["Narrowed Dim.", "N"], inplace=True)
narrow_exper2.drop(["m", "n", "k", "l", "error"], axis=1, inplace=True)
fig = plt.figure()
fig.suptitle(argv[3])
i = 1
for name, group in narrow_exper2.groupby(level="Narrowed Dim."):
ax = fig.add_subplot(220 + i)
group2 = group.copy()
group2.index = group2.index.droplevel()
x_max = int(ceil(group2.index[-1] / 1000.0)) * 1000
group2.plot(ax=ax, title="Narrow {}".format(name),
xlim=(0, x_max), ylim=(0, 56),
style=['r.', 'c+'])
ax.set_xlabel("N (large dimensions)")
ax.set_ylabel("GFlops/s")
i = i + 1
fig.tight_layout()
fig.subplots_adjust(top=0.9)
if len(argv) < 5:
plt.show()
else:
plt.savefig(argv[4])
| bsd-3-clause |
jart/tensorflow | tensorflow/contrib/factorization/python/ops/kmeans.py | 12 | 20349 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A canned Estimator for k-means clustering."""
# TODO(ccolby): Move clustering_ops.py into this file and streamline the code.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.factorization.python.ops import clustering_ops
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output
from tensorflow.python.feature_column import feature_column as fc
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class _LossRelativeChangeHook(session_run_hook.SessionRunHook):
"""Stops when the change in loss goes below a tolerance."""
def __init__(self, loss_tensor, tolerance):
"""Creates a _LossRelativeChangeHook.
Args:
loss_tensor: A scalar tensor of the loss value.
tolerance: A relative tolerance of loss change between iterations.
"""
self._loss_tensor = loss_tensor
self._tolerance = tolerance
self._prev_loss = None
def before_run(self, run_context):
del run_context # unused
return session_run_hook.SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
loss = run_values.results
assert loss is not None
if self._prev_loss:
relative_change = (
abs(loss - self._prev_loss) / (1 + abs(self._prev_loss)))
if relative_change < self._tolerance:
run_context.request_stop()
self._prev_loss = loss
class _InitializeClustersHook(session_run_hook.SessionRunHook):
"""Initializes the cluster centers.
The chief repeatedly invokes an initialization op until all cluster centers
are initialized. The workers wait for the initialization phase to complete.
"""
def __init__(self, init_op, is_initialized_var, is_chief):
"""Creates an _InitializeClustersHook.
Args:
init_op: An op that, when run, will choose some initial cluster centers.
This op may need to be run multiple times to choose all the centers.
is_initialized_var: A boolean variable reporting whether all initial
centers have been chosen.
is_chief: A boolean specifying whether this task is the chief.
"""
self._init_op = init_op
self._is_initialized_var = is_initialized_var
self._is_chief = is_chief
def after_create_session(self, session, coord):
del coord # unused
assert self._init_op.graph is ops.get_default_graph()
assert self._is_initialized_var.graph is self._init_op.graph
while True:
try:
if session.run(self._is_initialized_var):
break
elif self._is_chief:
session.run(self._init_op)
else:
time.sleep(1)
except RuntimeError as e:
logging.info(e)
def _parse_features_if_necessary(features, feature_columns):
"""Helper function to convert the input points into a usable format.
Args:
features: The input features.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column instances
that can be passed to `tf.feature_column.input_layer`. If this is None,
all features will be used.
Returns:
If `features` is a dict of `k` features (optionally filtered by
`feature_columns`), each of which is a vector of `n` scalars, the return
value is a Tensor of shape `(n, k)` representing `n` input points, where the
items in the `k` dimension are sorted lexicographically by `features` key.
If `features` is not a dict, it is returned unmodified.
"""
if not isinstance(features, dict):
return features
if feature_columns:
return fc.input_layer(features, feature_columns)
keys = sorted(features.keys())
with ops.colocate_with(features[keys[0]]):
return array_ops.concat([features[k] for k in keys], axis=1)
class _ModelFn(object):
"""Model function for the estimator."""
def __init__(self, num_clusters, initial_clusters, distance_metric,
random_seed, use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns):
self._num_clusters = num_clusters
self._initial_clusters = initial_clusters
self._distance_metric = distance_metric
self._random_seed = random_seed
self._use_mini_batch = use_mini_batch
self._mini_batch_steps_per_iteration = mini_batch_steps_per_iteration
self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries
self._relative_tolerance = relative_tolerance
self._feature_columns = feature_columns
def model_fn(self, features, mode, config):
"""Model function for the estimator.
Note that this does not take a `labels` arg. This works, but `input_fn` must
return either `features` or, equivalently, `(features, None)`.
Args:
features: The input points. See @{tf.estimator.Estimator}.
mode: See @{tf.estimator.Estimator}.
config: See @{tf.estimator.Estimator}.
Returns:
A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
* `loss`: The sum of the squared distances from each input point to its
closest center.
* `eval_metric_ops`: Maps `SCORE` to `loss`.
* `predictions`: Maps `ALL_DISTANCES` to the distance from each input
point to each cluster center; maps `CLUSTER_INDEX` to the index of
the closest cluster center for each input point.
"""
# input_points is a single Tensor. Therefore, the sharding functionality
# in clustering_ops is unused, and some of the values below are lists of a
# single item.
input_points = _parse_features_if_necessary(features, self._feature_columns)
# Let N = the number of input_points.
# all_distances: A list of one matrix of shape (N, num_clusters). Each value
# is the distance from an input point to a cluster center.
# model_predictions: A list of one vector of shape (N). Each value is the
# cluster id of an input point.
# losses: Similar to cluster_idx but provides the distance to the cluster
# center.
# is_initialized: scalar indicating whether the initial cluster centers
# have been chosen; see init_op.
# cluster_centers_var: a Variable containing the cluster centers.
# init_op: an op to choose the initial cluster centers. A single worker
# repeatedly executes init_op until is_initialized becomes True.
# training_op: an op that runs an iteration of training, either an entire
# Lloyd iteration or a mini-batch of a Lloyd iteration. Multiple workers
# may execute this op, but only after is_initialized becomes True.
(all_distances, model_predictions, losses, is_initialized, init_op,
training_op) = clustering_ops.KMeans(
inputs=input_points,
num_clusters=self._num_clusters,
initial_clusters=self._initial_clusters,
distance_metric=self._distance_metric,
use_mini_batch=self._use_mini_batch,
mini_batch_steps_per_iteration=self._mini_batch_steps_per_iteration,
random_seed=self._random_seed,
kmeans_plus_plus_num_retries=self._kmeans_plus_plus_num_retries
).training_graph()
loss = math_ops.reduce_sum(losses)
summary.scalar('loss/raw', loss)
incr_step = state_ops.assign_add(training_util.get_global_step(), 1)
training_op = control_flow_ops.with_dependencies([training_op, incr_step],
loss)
training_hooks = [
_InitializeClustersHook(init_op, is_initialized, config.is_chief)
]
if self._relative_tolerance is not None:
training_hooks.append(
_LossRelativeChangeHook(loss, self._relative_tolerance))
export_outputs = {
KMeansClustering.ALL_DISTANCES:
export_output.PredictOutput(all_distances[0]),
KMeansClustering.CLUSTER_INDEX:
export_output.PredictOutput(model_predictions[0]),
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(model_predictions[0])
}
return model_fn_lib.EstimatorSpec(
mode=mode,
predictions={
KMeansClustering.ALL_DISTANCES: all_distances[0],
KMeansClustering.CLUSTER_INDEX: model_predictions[0],
},
loss=loss,
train_op=training_op,
eval_metric_ops={KMeansClustering.SCORE: metrics.mean(loss)},
training_hooks=training_hooks,
export_outputs=export_outputs)
# TODO(agarwal,ands): support sharded input.
class KMeansClustering(estimator.Estimator):
"""An Estimator for K-Means clustering.
Example:
```
import numpy as np
import tensorflow as tf
num_points = 100
dimensions = 2
points = np.random.uniform(0, 1000, [num_points, dimensions])
def input_fn():
return tf.train.limit_epochs(
tf.convert_to_tensor(points, dtype=tf.float32), num_epochs=1)
num_clusters = 5
kmeans = tf.contrib.factorization.KMeansClustering(
num_clusters=num_clusters, use_mini_batch=False)
# train
num_iterations = 10
previous_centers = None
for _ in xrange(num_iterations):
kmeans.train(input_fn)
cluster_centers = kmeans.cluster_centers()
if previous_centers is not None:
print 'delta:', cluster_centers - previous_centers
previous_centers = cluster_centers
print 'score:', kmeans.score(input_fn)
print 'cluster centers:', cluster_centers
# map the input points to their clusters
cluster_indices = list(kmeans.predict_cluster_index(input_fn))
for i, point in enumerate(points):
cluster_index = cluster_indices[i]
center = cluster_centers[cluster_index]
print 'point:', point, 'is in cluster', cluster_index, 'centered at', center
```
The `SavedModel` saved by the `export_savedmodel` method does not include the
cluster centers. However, the cluster centers may be retrieved by the
latest checkpoint saved during training. Specifically,
```
kmeans.cluster_centers()
```
is equivalent to
```
tf.train.load_variable(
kmeans.model_dir, KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
```
"""
# Valid values for the distance_metric constructor argument.
SQUARED_EUCLIDEAN_DISTANCE = clustering_ops.SQUARED_EUCLIDEAN_DISTANCE
COSINE_DISTANCE = clustering_ops.COSINE_DISTANCE
# Values for initial_clusters constructor argument.
RANDOM_INIT = clustering_ops.RANDOM_INIT
KMEANS_PLUS_PLUS_INIT = clustering_ops.KMEANS_PLUS_PLUS_INIT
# Metric returned by evaluate(): The sum of the squared distances from each
# input point to its closest center.
SCORE = 'score'
# Keys returned by predict().
# ALL_DISTANCES: The distance from each input point to each cluster center.
# CLUSTER_INDEX: The index of the closest cluster center for each input point.
CLUSTER_INDEX = 'cluster_index'
ALL_DISTANCES = 'all_distances'
# Variable name used by cluster_centers().
CLUSTER_CENTERS_VAR_NAME = clustering_ops.CLUSTERS_VAR_NAME
def __init__(self,
num_clusters,
model_dir=None,
initial_clusters=RANDOM_INIT,
distance_metric=SQUARED_EUCLIDEAN_DISTANCE,
random_seed=0,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=2,
relative_tolerance=None,
config=None,
feature_columns=None):
"""Creates an Estimator for running KMeans training and inference.
This Estimator implements the following variants of the K-means algorithm:
If `use_mini_batch` is False, it runs standard full batch K-means. Each
training step runs a single iteration of K-Means and must process the full
input at once. To run in this mode, the `input_fn` passed to `train` must
return the entire input dataset.
If `use_mini_batch` is True, it runs a generalization of the mini-batch
K-means algorithm. It runs multiple iterations, where each iteration is
composed of `mini_batch_steps_per_iteration` steps. Each training step
accumulates the contribution from one mini-batch into temporary storage.
Every `mini_batch_steps_per_iteration` steps, the cluster centers are
updated and the temporary storage cleared for the next iteration. Note
that:
* If `mini_batch_steps_per_iteration=1`, the algorithm reduces to the
standard K-means mini-batch algorithm.
* If `mini_batch_steps_per_iteration = num_inputs / batch_size`, the
algorithm becomes an asynchronous version of the full-batch algorithm.
However, there is no guarantee by this implementation that each input
is seen exactly once per iteration. Also, different updates are applied
asynchronously without locking. So this asynchronous version may not
behave exactly like a full-batch version.
Args:
num_clusters: An integer tensor specifying the number of clusters. This
argument is ignored if `initial_clusters` is a tensor or numpy array.
model_dir: The directory to save the model results and log files.
initial_clusters: Specifies how the initial cluster centers are chosen.
One of the following:
* a tensor or numpy array with the initial cluster centers.
* a callable `f(inputs, k)` that selects and returns up to `k` centers
from an input batch. `f` is free to return any number of centers
from `0` to `k`. It will be invoked on successive input batches
as necessary until all `num_clusters` centers are chosen.
* `KMeansClustering.RANDOM_INIT`: Choose centers randomly from an input
batch. If the batch size is less than `num_clusters` then the
entire batch is chosen to be initial cluster centers and the
remaining centers are chosen from successive input batches.
* `KMeansClustering.KMEANS_PLUS_PLUS_INIT`: Use kmeans++ to choose
centers from the first input batch. If the batch size is less
than `num_clusters`, a TensorFlow runtime error occurs.
distance_metric: The distance metric used for clustering. One of:
* `KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`: Euclidean distance
between vectors `u` and `v` is defined as \\(||u - v||_2\\)
which is the square root of the sum of the absolute squares of
the elements' difference.
* `KMeansClustering.COSINE_DISTANCE`: Cosine distance between vectors
`u` and `v` is defined as \\(1 - (u . v) / (||u||_2 ||v||_2)\\).
random_seed: Python integer. Seed for PRNG used to initialize centers.
use_mini_batch: A boolean specifying whether to use the mini-batch k-means
algorithm. See explanation above.
mini_batch_steps_per_iteration: The number of steps after which the
updated cluster centers are synced back to a master copy. Used only if
`use_mini_batch=True`. See explanation above.
kmeans_plus_plus_num_retries: For each point that is sampled during
kmeans++ initialization, this parameter specifies the number of
additional points to draw from the current distribution before selecting
the best. If a negative value is specified, a heuristic is used to
sample `O(log(num_to_sample))` additional points. Used only if
`initial_clusters=KMeansClustering.KMEANS_PLUS_PLUS_INIT`.
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
config: See @{tf.estimator.Estimator}.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
is None, all features will be used.
Raises:
ValueError: An invalid argument was passed to `initial_clusters` or
`distance_metric`.
"""
if isinstance(initial_clusters, str) and initial_clusters not in [
KMeansClustering.RANDOM_INIT, KMeansClustering.KMEANS_PLUS_PLUS_INIT
]:
raise ValueError(
"Unsupported initialization algorithm '%s'" % initial_clusters)
if distance_metric not in [
KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
KMeansClustering.COSINE_DISTANCE
]:
raise ValueError("Unsupported distance metric '%s'" % distance_metric)
super(KMeansClustering, self).__init__(
model_fn=_ModelFn(
num_clusters, initial_clusters, distance_metric, random_seed,
use_mini_batch, mini_batch_steps_per_iteration,
kmeans_plus_plus_num_retries, relative_tolerance,
feature_columns).model_fn,
model_dir=model_dir,
config=config)
def _predict_one_key(self, input_fn, predict_key):
for result in self.predict(input_fn=input_fn, predict_keys=[predict_key]):
yield result[predict_key]
def predict_cluster_index(self, input_fn):
"""Finds the index of the closest cluster center to each input point.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The index of the closest cluster center for each input point.
"""
for index in self._predict_one_key(input_fn,
KMeansClustering.CLUSTER_INDEX):
yield index
def score(self, input_fn):
"""Returns the sum of squared distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative sum.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.evaluate}. Only one
batch is retrieved.
Returns:
The sum of the squared distance from each point in the first batch of
inputs to its nearest cluster center.
"""
return self.evaluate(input_fn=input_fn, steps=1)[KMeansClustering.SCORE]
def transform(self, input_fn):
"""Transforms each input point to its distances to all cluster centers.
Note that if `distance_metric=KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE`,
this
function returns the squared Euclidean distance while the corresponding
sklearn function returns the Euclidean distance.
Args:
input_fn: Input points. See @{tf.estimator.Estimator.predict}.
Yields:
The distances from each input point to each cluster center.
"""
for distances in self._predict_one_key(input_fn,
KMeansClustering.ALL_DISTANCES):
yield distances
def cluster_centers(self):
"""Returns the cluster centers."""
return self.get_variable_value(KMeansClustering.CLUSTER_CENTERS_VAR_NAME)
| apache-2.0 |
toobaz/pandas | pandas/tests/arrays/categorical/test_constructors.py | 2 | 23359 | from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
NaT,
Series,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas.util.testing as tm
class TestCategoricalConstructors:
def test_validate_ordered(self):
# see gh-14058
exp_msg = "'ordered' must either be 'True' or 'False'"
exp_err = TypeError
# This should be a boolean.
ordered = np.array([0, 1, 2])
with pytest.raises(exp_err, match=exp_msg):
Categorical([1, 2, 3], ordered=ordered)
with pytest.raises(exp_err, match=exp_msg):
Categorical.from_codes(
[0, 0, 1], categories=["a", "b", "c"], ordered=ordered
)
def test_constructor_empty(self):
# GH 17248
c = Categorical([])
expected = Index([])
tm.assert_index_equal(c.categories, expected)
c = Categorical([], categories=[1, 2, 3])
expected = pd.Int64Index([1, 2, 3])
tm.assert_index_equal(c.categories, expected)
def test_constructor_empty_boolean(self):
# see gh-22702
cat = pd.Categorical([], categories=[True, False])
categories = sorted(cat.categories.tolist())
assert categories == [False, True]
def test_constructor_tuples(self):
values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object)
result = Categorical(values)
expected = Index([(1,), (1, 2)], tupleize_cols=False)
tm.assert_index_equal(result.categories, expected)
assert result.ordered is False
def test_constructor_tuples_datetimes(self):
# numpy will auto reshape when all of the tuples are the
# same len, so add an extra one with 2 items and slice it off
values = np.array(
[
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
(Timestamp("2010-01-01"),),
(Timestamp("2010-01-02"),),
("a", "b"),
],
dtype=object,
)[:-1]
result = Categorical(values)
expected = Index(
[(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)],
tupleize_cols=False,
)
tm.assert_index_equal(result.categories, expected)
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype="O")
factor = Categorical(arr, ordered=False)
assert not factor.ordered
# this however will raise as cannot be sorted
msg = (
"'values' is not ordered, please explicitly specify the "
"categories order by passing in a categories argument."
)
with pytest.raises(TypeError, match=msg):
Categorical(arr, ordered=True)
def test_constructor_interval(self):
result = Categorical(
[Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True
)
ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)])
exp = Categorical(ii, ordered=True)
tm.assert_categorical_equal(result, exp)
tm.assert_index_equal(result.categories, ii)
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_)
c1 = Categorical(exp_arr)
tm.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
tm.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
msg = "Categorical categories must be unique"
with pytest.raises(ValueError, match=msg):
Categorical([1, 2], [1, 2, 2])
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ["a", "b", "b"])
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
assert not c1.ordered
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
tm.assert_numpy_array_equal(c1.__array__(), c2.__array__())
tm.assert_index_equal(c2.categories, Index(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
tm.assert_categorical_equal(c1, c2)
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
tm.assert_categorical_equal(c1, c2)
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
tm.assert_categorical_equal(c1, c2)
# This should result in integer categories, not float!
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
assert is_integer_dtype(cat.categories)
# https://github.com/pandas-dev/pandas/issues/3678
cat = Categorical([np.nan, 1, 2, 3])
assert is_integer_dtype(cat.categories)
# this should result in floats
cat = Categorical([np.nan, 1, 2.0, 3])
assert is_float_dtype(cat.categories)
cat = Categorical([np.nan, 1.0, 2.0, 3.0])
assert is_float_dtype(cat.categories)
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notna()])
# assert is_integer_dtype(vals)
# corner cases
cat = Categorical([1])
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
cat = Categorical(["a"])
assert len(cat.categories) == 1
assert cat.categories[0] == "a"
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# Scalars should be converted to lists
cat = Categorical(1)
assert len(cat.categories) == 1
assert cat.categories[0] == 1
assert len(cat.codes) == 1
assert cat.codes[0] == 0
# two arrays
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(None):
c_old = Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) # noqa
# the next one are from the old docs
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical( # noqa
np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True
)
def test_constructor_with_existing_categories(self):
# GH25318: constructing with pd.Series used to bogusly skip recoding
# categories
c0 = Categorical(["a", "b", "c", "a"])
c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"])
c2 = Categorical(c0, categories=c1.categories)
tm.assert_categorical_equal(c1, c2)
c3 = Categorical(Series(c0), categories=c1.categories)
tm.assert_categorical_equal(c1, c3)
def test_constructor_not_sequence(self):
# https://github.com/pandas-dev/pandas/issues/16022
msg = r"^Parameter 'categories' must be list-like, was"
with pytest.raises(TypeError, match=msg):
Categorical(["a", "b"], categories="a")
def test_constructor_with_null(self):
# Cannot have NaN in categories
msg = "Categorial categories cannot be null"
with pytest.raises(ValueError, match=msg):
Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"])
with pytest.raises(ValueError, match=msg):
Categorical(
DatetimeIndex(["nat", "20160101"]),
categories=[NaT, Timestamp("20160101")],
)
def test_constructor_with_index(self):
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(ci.values, Categorical(ci))
ci = CategoricalIndex(list("aabbca"), categories=list("cab"))
tm.assert_categorical_equal(
ci.values, Categorical(ci.astype(object), categories=ci.categories)
)
def test_constructor_with_generator(self):
# This was raising an Error in isna(single_val).any() because isna
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical(xrange(3))
tm.assert_categorical_equal(cat, exp)
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ["a", "b", "c"]])
# check that categories accept generators and sequences
cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
tm.assert_categorical_equal(cat, exp)
cat = Categorical([0, 1, 2], categories=xrange(3))
tm.assert_categorical_equal(cat, exp)
@pytest.mark.parametrize(
"dtl",
[
date_range("1995-01-01 00:00:00", periods=5, freq="s"),
date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"),
timedelta_range("1 day", periods=5, freq="s"),
],
)
def test_constructor_with_datetimelike(self, dtl):
# see gh-12077
# constructor with a datetimelike and NaT
s = Series(dtl)
c = Categorical(s)
expected = type(dtl)(s)
expected.freq = None
tm.assert_index_equal(c.categories, expected)
tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8"))
# with NaT
s2 = s.copy()
s2.iloc[-1] = NaT
c = Categorical(s2)
expected = type(dtl)(s2.dropna())
expected.freq = None
tm.assert_index_equal(c.categories, expected)
exp = np.array([0, 1, 2, 3, -1], dtype=np.int8)
tm.assert_numpy_array_equal(c.codes, exp)
result = repr(c)
assert "NaT" in result
def test_constructor_from_index_series_datetimetz(self):
idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern")
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_timedelta(self):
idx = timedelta_range("1 days", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_from_index_series_period(self):
idx = period_range("2015-01-01", freq="D", periods=3)
result = Categorical(idx)
tm.assert_index_equal(result.categories, idx)
result = Categorical(Series(idx))
tm.assert_index_equal(result.categories, idx)
def test_constructor_invariant(self):
# GH 14190
vals = [
np.array([1.0, 1.2, 1.8, np.nan]),
np.array([1, 2, 3], dtype="int64"),
["a", "b", "c", np.nan],
[pd.Period("2014-01"), pd.Period("2014-02"), NaT],
[Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT],
[
Timestamp("2014-01-01", tz="US/Eastern"),
Timestamp("2014-01-02", tz="US/Eastern"),
NaT,
],
]
for val in vals:
c = Categorical(val)
c2 = Categorical(c)
tm.assert_categorical_equal(c, c2)
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_with_dtype(self, ordered):
categories = ["b", "a", "c"]
dtype = CategoricalDtype(categories, ordered=ordered)
result = Categorical(["a", "b", "a", "c"], dtype=dtype)
expected = Categorical(
["a", "b", "a", "c"], categories=categories, ordered=ordered
)
tm.assert_categorical_equal(result, expected)
assert result.ordered is ordered
def test_constructor_dtype_and_others_raises(self):
dtype = CategoricalDtype(["a", "b"], ordered=True)
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], categories=["a", "b"], dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=True, dtype=dtype)
with pytest.raises(ValueError, match=msg):
Categorical(["a", "b"], ordered=False, dtype=dtype)
@pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]])
@pytest.mark.parametrize("ordered", [True, False])
def test_constructor_str_category(self, categories, ordered):
result = Categorical(
["a", "b"], categories=categories, ordered=ordered, dtype="category"
)
expected = Categorical(["a", "b"], categories=categories, ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_constructor_str_unknown(self):
with pytest.raises(ValueError, match="Unknown dtype"):
Categorical([1, 2], dtype="foo")
def test_constructor_from_categorical_with_dtype(self):
dtype = CategoricalDtype(["a", "b", "c"], ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use dtype.categories, not values.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_with_unknown_dtype(self):
dtype = CategoricalDtype(None, ordered=True)
values = Categorical(["a", "b", "d"])
result = Categorical(values, dtype=dtype)
# We use values.categories, not dtype.categories
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "d"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_constructor_from_categorical_string(self):
values = Categorical(["a", "b", "d"])
# use categories, ordered
result = Categorical(
values, categories=["a", "b", "c"], ordered=True, dtype="category"
)
expected = Categorical(
["a", "b", "d"], categories=["a", "b", "c"], ordered=True
)
tm.assert_categorical_equal(result, expected)
# No string
result = Categorical(values, categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_constructor_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
def test_from_codes(self):
# too few categories
dtype = CategoricalDtype(categories=[1, 2])
msg = "codes need to be between "
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([1, 2], dtype=dtype)
# no int codes
msg = "codes need to be array-like integers"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(["a"], dtype=dtype)
# no unique categories
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"])
# NaN categories included
with pytest.raises(ValueError, match="Categorial categories cannot be null"):
Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan])
# too negative
dtype = CategoricalDtype(categories=["a", "b", "c"])
msg = r"codes need to be between -1 and len\(categories\)-1"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], categories=dtype.categories)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([-2, 1, 2], dtype=dtype)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], categories=dtype.categories)
tm.assert_categorical_equal(exp, res)
res = Categorical.from_codes([0, 1, 2], dtype=dtype)
tm.assert_categorical_equal(exp, res)
def test_from_codes_with_categorical_categories(self):
# GH17884
expected = Categorical(["a", "b"], categories=["a", "b", "c"])
result = Categorical.from_codes([0, 1], categories=Categorical(["a", "b", "c"]))
tm.assert_categorical_equal(result, expected)
result = Categorical.from_codes(
[0, 1], categories=CategoricalIndex(["a", "b", "c"])
)
tm.assert_categorical_equal(result, expected)
# non-unique Categorical still raises
with pytest.raises(ValueError, match="Categorical categories must be unique"):
Categorical.from_codes([0, 1], Categorical(["a", "b", "a"]))
def test_from_codes_with_nan_code(self):
# GH21767
codes = [1, 2, np.nan]
dtype = CategoricalDtype(categories=["a", "b", "c"])
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, categories=dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_float(self):
# GH21767
codes = [1.0, 2.0, 0] # integer, but in float dtype
dtype = CategoricalDtype(categories=["a", "b", "c"])
with tm.assert_produces_warning(FutureWarning):
cat = Categorical.from_codes(codes, dtype.categories)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="i1"))
with tm.assert_produces_warning(FutureWarning):
cat = Categorical.from_codes(codes, dtype=dtype)
tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="i1"))
codes = [1.1, 2.0, 0] # non-integer
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype.categories)
with pytest.raises(ValueError, match="codes need to be array-like integers"):
Categorical.from_codes(codes, dtype=dtype)
def test_from_codes_with_dtype_raises(self):
msg = "Cannot specify"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"])
)
with pytest.raises(ValueError, match=msg):
Categorical.from_codes(
[0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"])
)
def test_from_codes_neither(self):
msg = "Both were None"
with pytest.raises(ValueError, match=msg):
Categorical.from_codes([0, 1])
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories(self, dtype):
cats = ["a", "b"]
codes = np.array([0, 0, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes(codes, cats)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, "category"])
def test_from_inferred_categories_sorts(self, dtype):
cats = ["b", "a"]
codes = np.array([0, 1, 1, 1], dtype="i8")
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_dtype(self):
cats = ["a", "b", "d"]
codes = np.array([0, 1, 0, 2], dtype="i8")
dtype = CategoricalDtype(["c", "b", "a"], ordered=True)
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical(
["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True
)
tm.assert_categorical_equal(result, expected)
def test_from_inferred_categories_coerces(self):
cats = ["1", "2", "bad"]
codes = np.array([0, 0, 1, 2], dtype="i8")
dtype = CategoricalDtype([1, 2])
result = Categorical._from_inferred_categories(cats, codes, dtype)
expected = Categorical([1, 1, 2, np.nan])
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("ordered", [None, True, False])
def test_construction_with_ordered(self, ordered):
# GH 9347, 9190
cat = Categorical([0, 1, 2], ordered=ordered)
assert cat.ordered == bool(ordered)
@pytest.mark.xfail(reason="Imaginary values not supported in Categorical")
def test_constructor_imaginary(self):
values = [1, 2, 3 + 1j]
c1 = Categorical(values)
tm.assert_index_equal(c1.categories, Index(values))
tm.assert_numpy_array_equal(np.array(c1), np.array(values))
| bsd-3-clause |
stscieisenhamer/glue | glue/core/data_factories/tests/test_data_factories.py | 2 | 10412 | from __future__ import absolute_import, division, print_function
import warnings
import sys
import pytest
import numpy as np
from mock import MagicMock
from numpy.testing import assert_allclose, assert_array_equal
from glue.core.component import CategoricalComponent
from glue.core.data import Data
from glue.core import data_factories as df
from glue.config import data_factory
from glue.tests.helpers import (requires_astropy,
requires_pil_or_skimage, make_file, requires_qt)
def test_load_data_auto_assigns_label():
factory = MagicMock()
result = Data(x=[1, 2, 3], label='')
factory.return_value = result
d = df.load_data('test.fits', factory)
factory.assert_called_once_with('test.fits')
assert d.label == 'test'
def test_extension():
assert df._extension('test.fits') == 'fits'
assert df._extension('test.fits.gz') == 'fits.gz'
assert df._extension('test.fits.gzip') == 'fits.gzip'
assert df._extension('test.fits.bz') == 'fits.bz'
assert df._extension('test.fits.bz2') == 'fits.bz2'
assert df._extension('test.other.names.fits') == 'fits'
def test_data_label():
assert df.data_label('test.fits') == 'test'
assert df.data_label('/Leading/Path/test.fits') == 'test'
assert df.data_label('') == ''
assert df.data_label('/Leading/Path/no_extension') == 'no_extension'
assert df.data_label('no_extension') == 'no_extension'
@requires_pil_or_skimage
def test_grey_png_loader():
# Greyscale PNG
data = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00\x02\x08\x00\x00\x00\x00W\xddR\xf8\x00\x00\x00\x0eIDATx\x9ccdddab\x04\x00\x00&\x00\x0b\x8e`\xe7A\x00\x00\x00\x00IEND\xaeB`\x82'
with make_file(data, '.png') as fname:
d = df.load_data(fname)
assert df.find_factory(fname) is df.img_data
assert_array_equal(d['PRIMARY'], [[3, 4], [1, 2]])
@requires_pil_or_skimage
def test_color_png_loader():
# Colorscale PNG
data = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x02\x00\x00\x00\x02\x08\x02\x00\x00\x00\xfd\xd4\x9as\x00\x00\x00\x15IDAT\x08\xd7\x05\xc1\x01\x01\x00\x00\x00\x80\x10\xffO\x17B\x14\x1a!\xec\x04\xfc\xf2!Q\\\x00\x00\x00\x00IEND\xaeB`\x82'
with make_file(data, '.png') as fname:
d = df.load_data(fname)
assert df.find_factory(fname) is df.img_data
assert_array_equal(d['red'], [[255, 0], [255, 0]])
assert_array_equal(d['green'], [[255, 0], [0, 255]])
assert_array_equal(d['blue'], [[0, 255], [0, 0]])
@pytest.mark.parametrize(('delim', 'suffix'),
((',', '.csv'),
('\t', '.tsv'),
('|', '.txt'),
(' ', '.dat'),
('\t', '.tbl')))
def test_ascii_catalog_factory(delim, suffix):
data = ("#a%sb\n1%s2" % (delim, delim)).encode('ascii')
with make_file(data, suffix) as fname:
d = df.load_data(fname)
assert df.find_factory(fname) is df.tabular_data
assert_array_equal(d['a'], [1])
assert_array_equal(d['b'], [2])
@pytest.mark.parametrize(('delim', 'suffix'),
((',', '.csv'),
('\t', '.tsv'),
('|', '.txt'),
(' ', '.dat'),
('\t', '.tbl')))
def test_pandas_parse_delimiters(delim, suffix):
data = ("a%sb\n1%s2" % (delim, delim)).encode('ascii')
with make_file(data, suffix) as fname:
d = df.load_data(fname, factory=df.pandas_read_table)
assert_array_equal(d['a'], [1])
assert_array_equal(d['b'], [2])
@requires_astropy
def test_csv_gz_factory():
data = b'\x1f\x8b\x08\x08z\x1e}R\x00\x03test.csv\x00\xab\xe02\xe42\xe22\xe6\x02\x00y\xffzx\x08\x00\x00\x00'
with make_file(data, '.csv.gz') as fname:
d = df.load_data(fname)
assert df.find_factory(fname) is df.tabular_data
assert_array_equal(d['x'], [1, 2, 3])
@requires_astropy
def test_sextractor_factory():
data = b"""# 1 NUMBER Running object number
# 2 X_IMAGE Object position along x [pixel]
# 3 Y_IMAGE Object position along y [pixel]
1 2988.249 2.297
2 2373.747 3.776
3 3747.026 4.388"""
with make_file(data, '.cat') as fname:
d = df.load_data(fname, factory=df.sextractor_factory)
assert_allclose(d['NUMBER'], [1, 2, 3])
assert_allclose(d['X_IMAGE'], [2988.249, 2373.747, 3747.026])
assert_allclose(d['Y_IMAGE'], [2.297, 3.776, 4.388])
def test_csv_pandas_factory():
data = b"""a,b,c,d
1,2.1,some,True
2,2.4,categorical,False
3,1.4,data,True
4,4.0,here,True
5,6.3,,False
6,8.7,,False
8,9.2,,True"""
with make_file(data, '.csv') as fname:
d = df.load_data(fname, factory=df.pandas_read_table)
assert d['a'].dtype == np.int64
assert d['b'].dtype == np.float
assert d['c'].dtype == np.float
cat_comp = d.find_component_id('c')
assert isinstance(d.get_component(cat_comp), CategoricalComponent)
correct_cats = np.unique(np.asarray(['some', 'categorical',
'data', 'here',
'', '', '']))
np.testing.assert_equal(d.get_component(cat_comp).categories,
correct_cats)
cat_comp = d.find_component_id('d')
assert isinstance(d.get_component(cat_comp), CategoricalComponent)
def test_dtype_int():
data = b'# a, b\n1, 1 \n2, 2 \n3, 3'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.int
def test_dtype_float():
data = b'# a, b\n1., 1 \n2, 2 \n3, 3'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.float
def test_dtype_float_on_categorical():
data = b'# a, b\nf, 1 \nr, 2 \nk, 3'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.float
def test_dtype_badtext():
data = b'# a, b\nlabel1, 1 \n2, 2 \n3, 3\n4, 4\n5, 5\n6, 6'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.float
assert_array_equal(d['a'], [np.nan, 2, 3, 4, 5, 6])
def test_dtype_missing_data_col2():
data = b'# a, b\n1 , 1 \n2, \n3, 3.0'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['b'].dtype == np.float
assert_array_equal(d['b'], [1, np.nan, 3])
def test_dtype_missing_data_col1():
data = b'# a, b\n1.0, 1 \n , 2 \n3, 3'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.float
assert_array_equal(d['a'], [1, np.nan, 3])
def test_column_spaces():
data = b'#a, b\nhere I go, 1\n2, 3\n3, 4\n5, 6\n7, 8'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
assert d['a'].dtype == np.float
assert_array_equal(d['a'], [np.nan, 2, 3, 5, 7])
def test_data_reload():
data = b'#a, b\n0, 1\n2, 3\n3, 4\n5, 6\n7, 8'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
coords_old = d.coords
with open(fname, 'w') as f2:
f2.write('#a, b\n0, 0\n0, 0\n0, 0\n0, 0\n0, 0')
d._load_log.reload()
assert_array_equal(d['a'], [0, 0, 0, 0, 0])
assert_array_equal(d['b'], [0, 0, 0, 0, 0])
assert d.coords is not coords_old
@pytest.mark.skipif(sys.platform.startswith('win'), reason='file deletion doesn\'t work on Windows')
def test_data_reload_no_file():
data = b'#a, b\n0, 1\n2, 3\n3, 4\n5, 6\n7, 8'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
# file no longer exists
with warnings.catch_warnings(record=True) as w:
d._load_log.reload()
assert len(w) == 1
assert str(w[0].message).startswith('Could not reload')
assert_array_equal(d['a'], [0, 2, 3, 5, 7])
def test_data_reload_shape_change():
data = b'#a, b\n0, 1\n2, 3\n3, 4\n5, 6\n7, 8'
with make_file(data, '.csv') as fname:
d = df.load_data(fname)
coords_old = d.coords
with open(fname, 'w') as f2:
f2.write('#a, b\n0, 0\n0, 0\n0, 0\n0, 0')
with warnings.catch_warnings(record=True) as w:
d._load_log.reload()
assert len(w) == 1
assert str(w[0].message) == 'Cannot refresh data -- data shape changed'
assert_array_equal(d['a'], [0, 2, 3, 5, 7])
assert d.coords is coords_old
# TODO: this doesn't belong in the core since it relies on Qt
@requires_qt
def test_file_watch():
cb = MagicMock()
with make_file(b'test', 'csv') as fname:
fw = df.FileWatcher(fname, cb)
fw.check_for_changes()
assert cb.call_count == 0
# fudge stat_cache to simulate filechange
# we could just change the file, but
# the underlying OS check has low time resolution
# and would require a sleep
fw.stat_cache -= 1
fw.check_for_changes()
assert cb.call_count == 1
@requires_qt
@pytest.mark.skipif(sys.platform.startswith('win'), reason='file deletion doesn\'t work on Windows')
def test_file_watch_os_error():
cb = MagicMock()
with make_file(b'test', 'csv') as fname:
fw = df.FileWatcher(fname, cb)
with warnings.catch_warnings(record=True) as w:
fw.check_for_changes()
assert len(w) == 1
assert str(w[0].message).startswith('Cannot access')
assert cb.call_count == 0
def test_ambiguous_format(tmpdir):
@data_factory('b', identifier=df.has_extension('spam'), priority=34)
def reader1(filename):
return Data()
@data_factory('a', identifier=df.has_extension('spam'), priority=34)
def reader2(filename):
return Data()
@data_factory('c', identifier=df.has_extension('spam'), priority=22)
def reader3(filename):
return Data()
filename = tmpdir.join('test.spam').strpath
with open(filename, 'w') as f:
f.write('Camelot!')
# Should raise a warning and pick the highest priority one in alphabetical
# order
with warnings.catch_warnings(record=True) as w:
factory = df.find_factory(filename)
assert len(w) == 1
assert str(w[0].message) == "Multiple data factories matched the input: 'a', 'b'. Choosing 'a'."
assert factory is reader2
| bsd-3-clause |
ndchorley/scipy | scipy/stats/_discrete_distns.py | 34 | 21220 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from scipy.misc import logsumexp
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
def _logsf(self, k, M, n, N):
"""
More precise calculation than log(sf)
"""
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Integration over probability mass function using logsumexp
k2 = np.arange(quant + 1, draw + 1)
res.append(logsumexp(self._logpmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
MJuddBooth/pandas | pandas/core/tools/datetimes.py | 1 | 32195 | from datetime import datetime, time
from functools import partial
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs import Timestamp, conversion, parsing
from pandas._libs.tslibs.parsing import ( # noqa
DateParseError, _format_is_iso, _guess_datetime_format, parse_time_string)
from pandas._libs.tslibs.strptime import array_strptime
from pandas.compat import zip
from pandas.core.dtypes.common import (
ensure_object, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_float, is_integer, is_integer_dtype,
is_list_like, is_numeric_dtype, is_object_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
from pandas.core.dtypes.missing import notna
from pandas import compat
from pandas.core import algorithms
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def _maybe_cache(arg, format, cache, convert_listlike):
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series()
if cache:
# Perform a quicker unique check
from pandas import Index
if not Index(arg).is_unique:
unique_dates = algorithms.unique(arg)
cache_dates = convert_listlike(unique_dates, True, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _convert_and_box_cache(arg, cache_array, box, errors, name=None):
"""
Convert array of dates with a cache and box the result
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
box : boolean
True boxes result as an Index-like, False returns an ndarray
errors : string
'ignore' plus box=True will convert result to Index
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : datetime of converted dates
Returns:
- Index-like if box=True
- ndarray if box=False
"""
from pandas import Series, DatetimeIndex, Index
result = Series(arg).map(cache_array)
if box:
if errors == 'ignore':
return Index(result, name=name)
else:
return DatetimeIndex(result, name=name)
return result.values
def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results
def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype, objects_to_datetime64ns)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
# GH 23758: We may still need to localize the result with tz
try:
return result.tz_localize(tz)
except AttributeError:
return result
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
result, tz_parsed = objects_to_datetime64ns(
arg, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, errors=errors, require_iso8601=require_iso8601,
allow_object=True)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, True, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, compat.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors, box, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
if not box:
return values.values
return values
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except ValueError:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except ValueError:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except ValueError:
pass
return None
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
hecate-xw/Miscellaneous | TsinghuaCSLT/audioEmbedded/Mission/stable/addInfoToWav.py | 1 | 8504 | #!usr/bin/env python
#coding=utf-8
import wave
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
import sys
import math
import disposeWav
import MCLT
sys.setrecursionlimit(1000000) #手工设置递归调用深度
def getInfoOfBytes(strInfo): #将字符串转为ascii码的二进制形式表示
ascii = map(ord,strInfo)
bytes = ''
for byte in ascii:
suffix_zero = 8-len(bin(byte))+2 #前导0个数
bytes += (suffix_zero*'0' + bin(byte)[2:]) #增加前导0转为8位二进制
# bytes = struct.pack('%ds'%len(strInfo),strInfo)
return bytes
def setInfoWithLSB(audio,bytes): #以LSB的方法将数据嵌入音频中,双通道对称嵌入相同信息
synchronization = "00111100001111000011110000111100"
bytes = synchronization+bytes
for i in range(len(bytes)):
if (audio[0][i]%2 == 0 and int(bytes[i]) == 1) or (audio[0][i]%2 == 1 and int(bytes[i]) == 0):
audio[0][i] += 1
if (audio[1][i]%2 == 0 and int(bytes[i]) == 1) or (audio[1][i]%2 == 1 and int(bytes[i]) == 0):
audio[1][i] += 1
return audio
def setInfoWithMCLT(audio,bytes): #以MCLT的方法将数据嵌入音频中
"""
Use this function, you can set information whose format is string into audio data.
Args:
audio: A list of 2*N. it is the carrier that carry the information.
bytes: A string that store informatin that you want to set.
Return:
return a list that is same shape with audio, but this list has been set into information.
"""
BlockLen = 4096
synchronization = "001111000011110000111100001111000011110000111100"
bytes = synchronization + bytes
L = 6 #一位扩展为L个频率
s = [-1,1,-1,1,-1,1] #1对应4位编码
#将左通道的数据通过MCLT变换到复数域
B = len(audio[0])*2 / BlockLen - 1 #Block数量
#######################################
#以下为嵌入信息的方法
for i in range(B-1):
if i % 2 == 0: #Every Other Block
continue
X_prev = MCLT.FastMCLT(audio[0][(i-1)*BlockLen/2:(i+1)*BlockLen/2])
X_curr = MCLT.FastMCLT(audio[0][i*BlockLen/2:(i+2)*BlockLen/2])
X_next = MCLT.FastMCLT(audio[0][(i+1)*BlockLen/2:(i+3)*BlockLen/2])
#X = MCLT.FastMCLT(audio[0][i*BlockLen:(i+1)*BlockLen])
X = X_curr
for k in range( len(bytes) ):
#Calculate z1 and z2
z1 = []
z2 = []
for l in range(BlockLen/2):
if abs(l-k) < 2*L and abs(l-k) % 2 == 0:
temp = pow(-1,l)/( 2.0*math.pi*(l-k-1)*(l-k+1) )
z1.append(temp)
z2.append(temp)
elif abs(l-k) == 1:
temp = pow(-1,l)/8.0
z1.append(temp)
z2.append(-temp)
else:
temp = 0
z1.append(0)
z2.append(0)
###
for m in range(L):
if bytes[k] == '1':
X[(2*k+1)*L+m] = abs(X[(2*k+1)*L+m])*s[m] #2*k --> Every Other Frequency
else:
X[(2*k+1)*L+m] = -abs(X[(2*k+1)*L+m])*s[m]
#The following is for compensating the interference
for m in range(L):
if k < len(synchronization): #同步序列嵌入方式不同,为防止干扰
temp = np.inner(z1,X_prev)+np.inner(z2,X_next)+1.0/4.0*X[(2*k+1)*L+m-1]-1.0/4.0*X[(2*k+1)*L+m+1]
X[(2*k+1)*L+m] = X[(2*k+1)*L+m] - 2j*temp
#######################################
y = MCLT.FastIMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = MCLT.FastIMCLT(X_prev).tolist()[BlockLen/2:]
#y_prev = audio[0][i*BlockLen/2:(i+1)*BlockLen/2] #IMCLT变换结果与原始序列不同,故不能如此
y_next = MCLT.FastIMCLT(X_next).tolist()[:BlockLen/2]
#y_next = audio[0][(i+1)*BlockLen/2:(i+2)*BlockLen/2] #IMCLT变换结果与原始序列不同,故不能如此
y = np.array(y_prev + y_next) + y
audio[0][i*BlockLen/2:(i+2)*BlockLen/2] = y
'''
#将右通道的数据通过MCLT变换到复数域
B = len(audio[1])*2 / BlockLen - 1 #Block数量
#######################################
#以下为嵌入信息的方法
for i in range(B-1):
if i % 2 == 0: #Every Other Block
continue
X_prev = MCLT.FastMCLT(audio[1][(i-1)*BlockLen/2:(i+1)*BlockLen/2])
X_curr = MCLT.FastMCLT(audio[1][(i)*BlockLen/2:(i+2)*BlockLen/2])
X_next = MCLT.FastMCLT(audio[1][(i+1)*BlockLen/2:(i+3)*BlockLen/2])
#X = MCLT.FastMCLT(audio[0][i*BlockLen:(i+1)*BlockLen])
X = X_curr
for k in range( len(bytes) ):
#Calculate z1 and z2
z1 = []
z2 = []
for l in range(BlockLen/2):
if abs(l-k) < 2*L and abs(l-k) % 2 == 0:
temp = pow(-1,l)/( 2.0*math.pi*(l-k-1)*(l-k+1) )
z1.append(temp)
z2.append(temp)
elif abs(l-k) == 1:
temp = pow(-1,l)/8.0
z1.append(temp)
z2.append(-temp)
else:
temp = 0
z1.append(0)
z2.append(0)
###
for m in range(L):
if bytes[k] == '1':
X[(2*k+1)*L+m] = abs(X[(2*k+1)*L+m])*s[m] #2*k --> Every Other Frequency
else:
X[(2*k+1)*L+m] = -abs(X[(2*k+1)*L+m])*s[m]
#The following is for compensating the interference
for m in range(L):
temp = np.inner(z1,X_prev)+np.inner(z2,X_next)+1.0/4.0*X[(2*k+1)*L+m-1]-1.0/4.0*X[(2*k+1)*L+m+1]
X[(2*k+1)*L+m] = X[(2*k+1)*L+m] - 2j*temp
#######################################
y = MCLT.FastIMCLT(X) #将嵌入信息后的复数域信息反变换到实数域
y_prev = MCLT.FastIMCLT(X_prev).tolist()[BlockLen/2:]
y_next = MCLT.FastIMCLT(X_next).tolist()[:BlockLen/2]
y = np.array(y_prev + y_next) + y
audio[1][i*BlockLen/2:(i+2)*BlockLen/2] = y
'''
return audio
def setInfoWithFFT(audio, bytes): #fft 变换为频域嵌入信息
BlockLen = 4096
B = len(audio[0]) / BlockLen
synchronization = "00111100001111000011110000111100"
bytes = synchronization + bytes
for i in range(B):
FL = np.fft.rfft( audio[0][i*BlockLen:(i+1)*BlockLen] )
FR = np.fft.rfft( audio[1][i*BlockLen:(i+1)*BlockLen] )
for k in range( len(bytes) ):
if bytes[k] == '1':
FL[100+k] = abs(FL[100+k])
FR[100+k] = abs(FR[100+k])
else:
FL[100+k] = -abs(FL[100+k])
FR[100+k] = -abs(FR[100+k])
outputLeft = np.fft.irfft(FL)
outputRight = np.fft.irfft(FR)
audio[0][i*BlockLen:(i+1)*BlockLen] = outputLeft
audio[1][i*BlockLen:(i+1)*BlockLen] = outputRight
return audio
def test():
strInfo = "NanaliCCC"
bytes = getInfoOfBytes(strInfo)
#print bytes
nchannels, sampwidth, framerate, nframes, wave_data, time = disposeWav.read_wave_data("../wavFile/bird.wav")
wave_data = setInfoWithLSB(wave_data, bytes)
params = (nchannels, sampwidth, framerate, nframes,'NONE', 'not compressed')
disposeWav.write_wave("../wavFile/result1.wav",params,wave_data)
nchannels, sampwidth, framerate, nframes, wave_data, time = disposeWav.read_wave_data("../wavFile/bird.wav")
wave_data = setInfoWithMCLT(wave_data,bytes)
params = (nchannels, sampwidth, framerate, nframes,'NONE', 'not compressed')
disposeWav.write_wave("../wavFile/result2.wav",params,wave_data)
nchannels, sampwidth, framerate, nframes, wave_data, time = disposeWav.read_wave_data("../wavFile/bird.wav")
wave_data = setInfoWithFFT(wave_data,bytes)
params = (nchannels, sampwidth, framerate, nframes,'NONE', 'not compressed')
disposeWav.write_wave("../wavFile/result3.wav",params,wave_data)
if __name__ == "__main__":
#print setInfoWithMCLT.__doc__
test()
| mit |
chavdim/amazon_comments | analisys/nn.py | 1 | 1052 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 13 15:45:34 2016
@author: chavdar
"""
import csv
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import normalize
d = str(30)
with open('train_top'+d+'.csv', 'r',encoding='utf8') as f:
my_list = []
reader = csv.reader(f)
for row in reader:
my_list.append(row)
data = np.array(my_list)
data = data[1:,] # remove description
data = data.astype(np.float)
data = normalize(data, axis=0, norm='l2')
#norm age and rating
#data[0:,-2] = data[0:,-2] / data[0:,-2].max()
#data[0:,-1] = data[0:,-1] / data[0:,-1].max()
#data_word_age = data[0:,0:-1]
train_x = data[0:,0:-1]
train_y = np.array(data[0:,-1:]).reshape((data.shape[0], ))
X_train, X_test, y_train, y_test = train_test_split(train_x, train_y, test_size=0.3, random_state=0)
reg = MLPRegressor(hidden_layer_sizes=(50,))
reg.fit(X_train,y_train)
p=reg.predict(X_test)
s = reg.score(X_test,y_test)
print(s) | mit |
pythonvietnam/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
glennq/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
svviz/svviz | tests/runTests.py | 1 | 4643 | import datetime
import json
import os
import pandas
import subprocess
import sys
import time
import traceback
from svviz import demo
from svviz import testIssues
from svviz import rendertest
from svviz import testDemos
from svviz import testCounts
# USAGE = """
# python runTests.py run|reset [hg19.ref.fa]
# run - run all svviz tests
# reset - removes then regenerates all values stored for each test;
# use only after verifying that svviz behavior has changed
# and that the new behavior is correct
# hg19.ref.fa - path to reference genome; must be defined here or using
# the environmental variable SVVIZ_HG19_FASTA
# """
# reset ...
def reset():
print("resetting test values...")
previousSummaryPath = "countsTest.previousSummary.txt"
os.remove(previousSummaryPath)
raise Exception("not yet implemented: reset for demos and for render test")
# run ...
def _runTest(fn, description):
print("\n\n -- running {} --\n\n".format(description))
try:
t0 = time.time()
result = fn()
t1 = time.time()
result = [result[0], result[1], t1-t0]
except Exception as e:
print(" ** error running {}: {} **".format(description, e))
print(traceback.print_exc())
result = [False, str(e), -1]
return result
def getHG19Ref(path=None):
if path is not None:
os.environ["SVVIZ_HG19_FASTA"] = path
return path
path = os.environ.get("SVVIZ_HG19_FASTA", None)
assert os.path.exists(path), "Can't find hg19 reference fasta at path '{}'".format(path)
return path
def getCountsData():
path = "svviz-examples/countsTest"
if not os.path.exists(path):
result = demo.downloadDemo("countsTest")
if not result:
raise Exception("Couldn't download the countsTest data.")
def runTestCounts():
getCountsData()
genome = getHG19Ref()
vcfs = ["svviz-examples/countsTest/na12878_test_deletions.vcf"]
bams = ["svviz-examples/countsTest/reads.sorted.bam"]
previousSummaryPath = "countsTest.previousSummary.txt"
return testCounts.run(genome, vcfs, bams, previousSummaryPath)
def runTestIssues():
genome = getHG19Ref()
return testIssues.run(genome)
def saveTimingInfo(summary):
timingsPath = "test_timings.csv"
git_version = subprocess.check_output(["git", "describe"]).strip()
new_row = summary[["timing"]].T
new_row["date"] = [datetime.datetime.now()]
new_row["version"] = git_version
if os.path.exists(timingsPath):
timings = pandas.read_csv(timingsPath, index_col=0)
timings = pandas.concat([timings, new_row])
else:
timings = new_row
timings.to_csv(timingsPath)
print(timings)
def run(which):
print("running all tests...")
summary = pandas.DataFrame(columns=["pass", "info", "timing"])
# Test chromosome ends
if len(which)==0 or "chrom_ends" in which:
summary.loc["chrom_ends"] = _runTest(runTestIssues, "issues")
# Run the demos
if len(which)==0 or "demos" in which:
summary.loc["demos"] = _runTest(testDemos.run, "demos")
# Run regression testing on ref/alt/amb counts
if len(which)==0 or "counts" in which:
summary.loc["counts"] = _runTest(runTestCounts, "counts")
# Run the render regression tests
if len(which)==0 or "rendering" in which:
summary.loc["rendering"] = _runTest(rendertest.run, "rendering")
summary["timing"] = summary["timing"].apply(lambda x: "{}".format(datetime.timedelta(seconds=int(x))))
print(summary)
saveTimingInfo(summary)
def main():
# don't ask me why I rolled my own regression testing code instead of using one of the
# gazillion existing frameworks...
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--reference", help="path for hg19 reference fasta; must be defined here or "
"using the environmental variable SVVIZ_HG19_FASTA")
parser.add_argument("mode", help="run|reset")
parser.add_argument("which", nargs="*", help="which analyses to run (default all)")
args = parser.parse_args()
print(args.which)
# if len(sys.argv) < 2:
# print USAGE
# return
if args.mode == "run":
if getHG19Ref(args.reference) is None:
parser.print_help()
print("ERROR: Must provide path for hg19 reference fasta")
sys.exit(1)
run(args.which)
elif args.mode == "reset":
reset()
else:
parser.print_help()
if __name__ == '__main__':
main() | mit |
alexis-roche/nipy | examples/labs/need_data/plot_registration.py | 4 | 1068 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
Example of plotting a registration checker with nipy.labs visualization tools
The idea is to represent the anatomical image to be checked with an overlay of
the edges of the reference image. This idea is borrowed from FSL.
Needs the *templates* data package.
Needs matplotlib.
"""
print(__doc__)
try:
import matplotlib.pyplot as plt
except ImportError:
raise RuntimeError("This script needs the matplotlib library")
from nipy.labs import viz
from nipy.labs.viz_tools import anat_cache
# Get the data. Here we are using the reference T1 image
anat, affine, _ = anat_cache._AnatCache.get_anat()
# Here we use the same image as a reference. As a result it is perfectly
# aligned.
reference = anat
reference_affine = affine
slicer = viz.plot_anat(anat, affine, dim=.2, black_bg=True)
slicer.edge_map(reference, reference_affine)
plt.show()
| bsd-3-clause |
mathmare/TheoreticalMinimum | problems/Textbook - Doing Math With Python/temperature_variation.py | 2 | 1159 |
"""
Temperature Variation
This is my solution to the "Temperature Variation" problem. [1] Here, we are tasked to "find the
temperature at different points of the day" and "create a graph with the time on the x-axis and
the corresponding temperature on the y-axis." of two cities of our choice.
[1] Saha, A. (2015). Doing Math with Python: Use Programming to Explore Algebra, Statistics,
Calculus, and More! San Francisco: No Starch Press (Ch. 2, Challenge #1).
"""
import sys, datetime
import numpy as np
import matplotlib.pyplot as plt
new_york_temp_sample = [65, 66, 65, 65, 66, 65, 65, 65, 65, 64, 65, 65, 65, 65, 67, 69, 71, 74, 76, 78, 79, 79, 78, 79, 79, 78, 76]
las_vegas_temp_sample = [78, 77, 76, 75, 74, 72, 71, 71, 70, 69, 68, 67, 67, 69, 71, 72, 74, 75, 76, 77, 77, 77, 77, 75, 71, 68, 70]
if __name__ == '__main__':
plt.figure()
years = range(2001, 2028) # cos' we don't want to be off-by-one
plt.plot(years, new_york_temp_sample, color='blue')
plt.plot(years, las_vegas_temp_sample, color='green')
plt.xlabel('Time Series')
plt.ylabel('Temperature')
plt.xlim(2000, 2027)
plt.title('Matplotlib Time Series Example')
plt.show()
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/api/histogram_path_demo.py | 6 | 1464 | """
This example shows how to use a path patch to draw a bunch of
rectangles. The technique of using lots of Rectangle instances, or
the faster method of using PolyCollections, were implemented before we
had proper paths with moveto/lineto, closepoly etc in mpl. Now that
we have them, we can draw collections of regularly shaped objects with
homogeous properties more efficiently with a PathCollection. This
example makes a histogram -- its more work to set up the vertex arrays
at the outset, but it should be much faster for large numbers of
objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
fig = plt.figure()
ax = fig.add_subplot(111)
# histogram our data with numpy
data = np.random.randn(1000)
n, bins = np.histogram(data, 50)
# get the corners of the rectangles for the histogram
left = np.array(bins[:-1])
right = np.array(bins[1:])
bottom = np.zeros(len(left))
top = bottom + n
# we need a (numrects x numsides x 2) numpy array for the path helper
# function to build a compound path
XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T
# get the Path object
barpath = path.Path.make_compound_path_from_polys(XY)
# make a patch out of it
patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8)
ax.add_patch(patch)
# update the view limits
ax.set_xlim(left[0], right[-1])
ax.set_ylim(bottom.min(), top.max())
plt.show()
| gpl-2.0 |
JosmanPS/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
rdussurget/py-altiwaves | bin/test_spectral_analysis.py | 1 | 3396 | # -*- coding: utf-8 -*-
'''
TEST_SPECTRAL_ANALYSIS
@summary: This is a testing script, which applies the along-track wavelet transform <br />
to a simulated red noise data set and shows the results.
@note: The output graph should show two hovmöllers of the scale-averaged spectrum and<br />
simulated sea level, with detected features marked as black circles.
@author: Renaud DUSSURGET, LER/PAC IFREMER.
@since: Created in November 2012 by RD.
@copyright: Renaud Dussurget 2012.
@license: GNU Lesser General Public License
This file is part of PyAltiWAVES.
PyAltiWAVES is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
PyAltiWAVES is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
for more details.
You should have received a copy of the GNU Lesser General Public License along
with PyAltiWAVES. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
import kernel as ke
import altimetry.tools as AT
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
if __name__ == "__main__" :
#Simulate sea level data from red noise
#######################################
#Position data set in space and time
lonlatres=0.001 #Increase this factor if you want faster computation time (lesser resolution)
nt=50 #Time steps : decrease it to have faster computations
dj_factor=20 #Scale factor wrt. number of elements : increase it to get faster computation
lat = np.arange(43.0,44.0,lonlatres*2)
lon = np.arange(6,6.5,lonlatres)
dst=AT.calcul_distance(lat,lon)
dx=np.median(AT.deriv(dst))
N=len(lon)
dt=9.9
time=22705.0 + np.arange(0,nt)*dt
#Red noise generation (lengthscale > 10 km)
#sla=np.cumsum(np.random.randn(N*nt)).reshape((nt,N))
sla=np.ma.array(np.cumsum(np.cumsum(np.random.randn(nt,N),axis=1),axis=0),mask=np.zeros((nt,N),dtype=bool))
#Filter small scales
for i in np.arange(nt):
sla[i,:]=AT.loess(sla[i,:], dst, 10.)
#Run wavelet analysis
#####################
#WV periodogram analysis
perWV = ke.periodogram_analysis(dx, sla.transpose(),res_factor=dj_factor,average=True)
perpsdWV = perWV['psd']
pWV = perWV['p']
DWV = perWV['D']
gx,gy=np.meshgrid(DWV,pWV)
#3D periodogram
dum=np.ma.array(np.log10(perpsdWV),mask=(np.log10(perpsdWV) < -6))
dum.data[dum.mask]=-2
fig = plt.figure()
ax = Axes3D(fig)
ax.view_init(68, -30)
surf = ax.plot_surface(gx, np.log(gy), dum,vmin=-6,vmax=3,cmap=cm.jet,linewidth=0, antialiased=True,shade=True)#, rstride=1, cstride=1, cmap=cm.jet, extend3d=True)
ax.set_zlim([-6,3])
ax.set_xlabel('Along-Track Distance(km)')
ax.set_ylabel('log10(Spatial scale - m)')
ax.set_zlabel('log10(Power Spectral Density - cm2)')
plt.show()
print 'done' | lgpl-3.0 |
anntzer/mplcursors | doc/source/conf.py | 1 | 2931 | import os
import re
import sys
import mplcursors
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
]
needs_extensions = {'sphinx_gallery.gen_gallery': '0.6.0'}
source_suffix = '.rst'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
project = 'mplcursors'
copyright = '2016–present, Antony Lee'
author = 'Antony Lee'
# RTD modifies conf.py, making setuptools_scm mark the version as -dirty.
version = release = re.sub(r'\.dirty$', '', mplcursors.__version__)
language = 'en'
default_role = 'any'
pygments_style = 'sphinx'
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'description': 'Interactive data selection cursors for Matplotlib.',
'github_user': 'anntzer',
'github_repo': 'mplcursors',
'github_banner': True,
'github_button': False,
'code_font_size': '80%',
}
html_css_files = ['hide_some_gallery_elements.css']
html_static_path = ['_static']
html_sidebars = {'**': ['about.html', 'navigation.html', 'localtoc.html']}
# html_last_updated_fmt = '' # bitprophet/alabaster#93
htmlhelp_basename = 'mplcursors_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [(
master_doc,
'mplcursors.tex',
'mplcursors Documentation',
'Antony Lee',
'manual',
)]
# -- Options for manual page output ---------------------------------------
man_pages = [(
master_doc,
'mplcursors',
'mplcursors Documentation',
[author],
1,
)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [(
master_doc,
'mplcursors',
'mplcursors Documentation',
author,
'mplcursors',
'Interactive data selection cursors for Matplotlib.',
'Miscellaneous',
)]
# -- Misc. configuration --------------------------------------------------
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
}
# CustomSortKey cannot be defined *here* because it would be unpicklable as
# this file is exec'd rather than imported.
sys.path.append(".")
from _local_ext import CustomSortKey
os.environ.pop("DISPLAY", None) # Don't warn about non-GUI when running s-g.
sphinx_gallery_conf = {
'backreferences_dir': None,
'examples_dirs': '../../examples',
'filename_pattern': r'.*\.py',
'gallery_dirs': 'examples',
'min_reported_time': 1,
'within_subsection_order': CustomSortKey,
}
| mit |
hitszxp/scikit-learn | sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mortonjt/scipy | scipy/stats/_binned_statistic.py | 5 | 16974 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for a set of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : array_like
A sequence of values to be binned.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``.
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First a basic example:
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]), array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled', alpha=0.2,
... label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, xy = binned_statistic_dd([x], values, statistic,
bins, range)
return medians, edges[0], xy
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None):
"""
Compute a bidimensional binned statistic for a set of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (M,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like
The values on which the statistic will be computed. This must be
the same shape as `x`.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx=ny=bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edges = y_edges = bins),
* the bin edges in each dimension (x_edges, y_edges = bins).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin
xedges : (nx + 1) ndarray
The bin edges along the first dimension.
yedges : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as `values`.
See Also
--------
numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
.. versionadded:: 0.11.0
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, xy = binned_statistic_dd([x, y], values, statistic,
bins, range)
return medians, edges[0], edges[1], xy
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : array_like
The values on which the statistic will be computed. This must be
the same shape as x.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin
edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension
binnumber : 1-D ndarray of ints
This assigns to each observation an integer that represents the bin
in which this observation falls. Array has the same length as values.
See Also
--------
np.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# This code is based on np.histogramdd
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D * [None]
dedges = D * [None]
try:
M = len(bins)
if M != D:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = D * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(0), float))
smax = np.atleast_1d(np.array(sample.max(0), float))
else:
smin = np.zeros(D)
smax = np.zeros(D)
for i in np.arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in np.arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in np.arange(D):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in np.arange(D):
Ncount[i] = np.digitize(sample[:, i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in np.arange(D):
# Rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal)
== np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
xy = np.zeros(N, int)
for i in np.arange(0, D - 1):
xy += Ncount[ni[i]] * nbin[ni[i + 1:]].prod()
xy += Ncount[ni[-1]]
result = np.empty(nbin.prod(), float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
a = flatcount.nonzero()
result[a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(xy, None)
flatsum = np.bincount(xy, values)
flatsum2 = np.bincount(xy, values ** 2)
a = flatcount.nonzero()
result[a] = np.sqrt(flatsum2[a] / flatcount[a]
- (flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(xy, None)
a = np.arange(len(flatcount))
result[a] = flatcount
elif statistic == 'sum':
result.fill(0)
flatsum = np.bincount(xy, values)
a = np.arange(len(flatsum))
result[a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(xy):
result[i] = np.median(values[xy == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(xy):
result[i] = statistic(values[xy == i])
# Shape into a proper matrix
result = result.reshape(np.sort(nbin))
for i in np.arange(nbin.size):
j = ni.argsort()[i]
result = result.swapaxes(i, j)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D * [slice(1, -1)]
result = result[core]
if (result.shape != nbin - 2).any():
raise RuntimeError('Internal Shape Error')
return result, edges, xy
| bsd-3-clause |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/tests/frame/test_analytics.py | 1 | 80870 | # -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import timedelta, datetime
from distutils.version import LooseVersion
import sys
import nose
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange
from pandas import (compat, isnull, notnull, DataFrame, Series,
MultiIndex, date_range, Timestamp, _np_version_under1p11)
import pandas as pd
import pandas.core.common as com
import pandas.core.nanops as nanops
from pandas.util.testing import (assert_almost_equal,
assert_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import _np_version_under1p9
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(tm.TestCase, TestData):
_multiprocess_can_split_ = True
# ---------------------------------------------------------------------=
# Correlation and covariance
def test_corr_pearson(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
def test_corr_kendall(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
def test_corr_spearman(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan
assert_frame_equal(result, expected)
def test_corr_non_numeric(self):
tm._skip_if_no_scipy()
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()
assert_frame_equal(result, expected)
def test_corr_nooverlap(self):
tm._skip_if_no_scipy()
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.ix['A', 'B']))
self.assertTrue(isnull(rs.ix['B', 'A']))
self.assertEqual(rs.ix['A', 'A'], 1)
self.assertEqual(rs.ix['B', 'B'], 1)
self.assertTrue(isnull(rs.ix['C', 'C']))
def test_corr_constant(self):
tm._skip_if_no_scipy()
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
self.assertTrue(isnull(rs.values).all())
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
# it works!
df3.cov()
df3.corr()
def test_corr_int_and_boolean(self):
tm._skip_if_no_scipy()
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
assert_frame_equal(df.corr(meth), expected)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
self.assertTrue(isnull(result.values).all())
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.ix['A', 'B'] = np.nan
expected.ix['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()
assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
df.ix[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
self.assertNotIn('B', dropped)
dropped = a.corrwith(b, axis=1, drop=True)
self.assertNotIn(a.index[-1], dropped.index)
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])
assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)
assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
assert_almost_equal(c1, c2)
self.assertTrue(c1 < 1)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
assert_almost_equal(test.values, [2, 150, 'abcde'])
assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
tm.assertIsInstance(ct1, Series)
ct2 = frame.count(0)
tm.assertIsInstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
assert_series_equal(result, expected)
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
def test_stat_operators_attempt_obj_array(self):
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']
# GH #676
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
for meth in methods:
self.assertEqual(df.values.dtype, np.object_)
result = getattr(df, meth)(1)
expected = getattr(df.astype('f8'), meth)(1)
if not tm._incompat_bottleneck_version(meth):
assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))
def test_cummax(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))
def test_max(self):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nanvar(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_numeric_only_flag(self):
# GH #9201
methods = ['sem', 'var', 'std']
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.ix[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.ix[0, 'foo'] = 'a'
for meth in methods:
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
self.assertRaises(TypeError, lambda: getattr(df1, meth)
(axis=1, numeric_only=False))
self.assertRaises(TypeError, lambda: getattr(df2, meth)
(axis=1, numeric_only=False))
def test_quantile(self):
from numpy import percentile
q = self.tsframe.quantile(0.1, axis=0)
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.tsframe.quantile(0.9, axis=1)
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test degenerate case
q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)
assert(np.isnan(q['x']) and np.isnan(q['y']))
# non-numeric exclusion
df = DataFrame({'col1': ['A', 'A', 'B', 'B'], 'col2': [1, 2, 3, 4]})
rs = df.quantile(0.5)
xp = df.median()
assert_series_equal(rs, xp)
# axis
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile([.5, .75], axis=1)
expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],
3: [3.5, 3.75]}, index=[0.5, 0.75])
assert_frame_equal(result, expected, check_index_type=True)
# We may want to break API in the future to change this
# so that we exclude non-numeric along the same axis
# See GH #7312
df = DataFrame([[1, 2, 3],
['a', 'b', 4]])
result = df.quantile(.5, axis=1)
expected = Series([3., 4.], index=[0, 1])
assert_series_equal(result, expected)
def test_quantile_axis_parameter(self):
# GH 9543/9544
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=0)
expected = Series([2., 3.], index=["A", "B"])
assert_series_equal(result, expected)
expected = df.quantile(.5, axis="index")
assert_series_equal(result, expected)
result = df.quantile(.5, axis=1)
expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])
assert_series_equal(result, expected)
result = df.quantile(.5, axis="columns")
assert_series_equal(result, expected)
self.assertRaises(ValueError, df.quantile, 0.1, axis=-1)
self.assertRaises(ValueError, df.quantile, 0.1, axis="column")
def test_quantile_interpolation(self):
# GH #10174
if _np_version_under1p9:
raise nose.SkipTest("Numpy version under 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
result = df.quantile(.5, axis=1, interpolation='nearest')
expected = Series([1., 2., 3.], index=[1, 2, 3])
assert_series_equal(result, expected)
# axis
result = df.quantile([.5, .75], axis=1, interpolation='lower')
expected = DataFrame({1: [1., 1.], 2: [2., 2.],
3: [3., 3.]}, index=[0.5, 0.75])
assert_frame_equal(result, expected)
# test degenerate case
df = DataFrame({'x': [], 'y': []})
q = df.quantile(0.1, axis=0, interpolation='higher')
assert(np.isnan(q['x']) and np.isnan(q['y']))
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5], interpolation='midpoint')
# https://github.com/numpy/numpy/issues/7163
if _np_version_under1p11:
expected = DataFrame([[1.5, 1.5, 1.5], [2.5, 2.5, 2.5]],
index=[.25, .5], columns=['a', 'b', 'c'])
else:
expected = DataFrame([[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
if not _np_version_under1p9:
raise nose.SkipTest("Numpy version is greater than 1.9")
from numpy import percentile
# interpolation = linear (default case)
q = self.tsframe.quantile(0.1, axis=0, interpolation='linear')
self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))
q = self.intframe.quantile(0.1)
self.assertEqual(q['A'], percentile(self.intframe['A'], 10))
# test with and without interpolation keyword
q1 = self.intframe.quantile(0.1)
self.assertEqual(q1['A'], np.percentile(self.intframe['A'], 10))
assert_series_equal(q, q1)
# interpolation method other than default linear
expErrMsg = "Interpolation methods other than linear"
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile(.5, axis=1, interpolation='nearest')
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.5, .75], axis=1, interpolation='lower')
# test degenerate case
df = DataFrame({'x': [], 'y': []})
with assertRaisesRegexp(ValueError, expErrMsg):
q = df.quantile(0.1, axis=0, interpolation='higher')
# multi
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
with assertRaisesRegexp(ValueError, expErrMsg):
df.quantile([.25, .5], interpolation='midpoint')
def test_quantile_multi(self):
df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],
columns=['a', 'b', 'c'])
result = df.quantile([.25, .5])
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=['a', 'b', 'c'])
assert_frame_equal(result, expected)
# axis = 1
result = df.quantile([.25, .5], axis=1)
expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],
index=[.25, .5], columns=[0, 1, 2])
# empty
result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)
expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},
index=[.1, .9])
assert_frame_equal(result, expected)
def test_quantile_datetime(self):
df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})
# exclude datetime
result = df.quantile(.5)
expected = Series([2.5], index=['b'])
# datetime
result = df.quantile(.5, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],
index=['a', 'b'])
assert_series_equal(result, expected)
# datetime w/ multi
result = df.quantile([.5], numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],
index=[.5], columns=['a', 'b'])
assert_frame_equal(result, expected)
# axis = 1
df['c'] = pd.to_datetime(['2011', '2012'])
result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)
expected = Series([Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')],
index=[0, 1])
assert_series_equal(result, expected)
result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)
expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),
Timestamp('2011-07-02 12:00:00')]],
index=[0.5], columns=[0, 1])
assert_frame_equal(result, expected)
def test_quantile_invalid(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.tsframe.quantile(invalid)
def test_cumsum(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))
def test_cumprod(self):
self.tsframe.ix[5:10, 0] = nan
self.tsframe.ix[10:15, 1] = nan
self.tsframe.ix[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
ranks0 = self.frame.rank()
ranks1 = self.frame.rank(1)
mask = np.isnan(self.frame.values)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp0[mask] = np.nan
exp1 = np.apply_along_axis(rankdata, 1, fvals)
exp1[mask] = np.nan
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# integers
df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))
result = df.rank()
exp = df.astype(float).rank()
assert_frame_equal(result, exp)
result = df.rank(1)
exp = df.astype(float).rank(1)
assert_frame_equal(result, exp)
def test_rank2(self):
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0
result = df.rank(1, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([[1, 3, 2], [1, 2, 3]])
expected = df.rank(0) / 2.0
result = df.rank(0, pct=True)
assert_frame_equal(result, expected)
df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])
expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])
result = df.rank(1, numeric_only=False)
assert_frame_equal(result, expected)
expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])
result = df.rank(0, numeric_only=False)
assert_frame_equal(result, expected)
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check the rank
expected = DataFrame([[2., nan, 1.],
[2., 3., 1.]])
result = df.rank(1, numeric_only=False, ascending=True)
assert_frame_equal(result, expected)
expected = DataFrame([[1., nan, 2.],
[2., 1., 3.]])
result = df.rank(1, numeric_only=False, ascending=False)
assert_frame_equal(result, expected)
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
result = self.mixed_frame.rank(1)
expected = self.mixed_frame.rank(1, numeric_only=True)
assert_frame_equal(result, expected)
df = DataFrame({"a": [1e-20, -5, 1e-20 + 1e-40, 10,
1e60, 1e80, 1e-30]})
exp = DataFrame({"a": [3.5, 1., 3.5, 5., 6., 7., 2.]})
assert_frame_equal(df.rank(), exp)
def test_rank_na_option(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.frame['A'][::2] = np.nan
self.frame['B'][::3] = np.nan
self.frame['C'][::4] = np.nan
self.frame['D'][::5] = np.nan
# bottom
ranks0 = self.frame.rank(na_option='bottom')
ranks1 = self.frame.rank(1, na_option='bottom')
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fvals)
exp1 = np.apply_along_axis(rankdata, 1, fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# top
ranks0 = self.frame.rank(na_option='top')
ranks1 = self.frame.rank(1, na_option='top')
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, fval0)
exp1 = np.apply_along_axis(rankdata, 1, fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# bottom
ranks0 = self.frame.rank(na_option='top', ascending=False)
ranks1 = self.frame.rank(1, na_option='top', ascending=False)
fvals = self.frame.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fvals)
exp1 = np.apply_along_axis(rankdata, 1, -fvals)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
# descending
# top
ranks0 = self.frame.rank(na_option='bottom', ascending=False)
ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)
fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values
fval1 = self.frame.T
fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T
fval1 = fval1.fillna(np.inf).values
exp0 = np.apply_along_axis(rankdata, 0, -fval0)
exp1 = np.apply_along_axis(rankdata, 1, -fval1)
assert_almost_equal(ranks0.values, exp0)
assert_almost_equal(ranks1.values, exp1)
def test_rank_axis(self):
# check if using axes' names gives the same result
df = pd.DataFrame([[2, 1], [4, 3]])
assert_frame_equal(df.rank(axis=0), df.rank(axis='index'))
assert_frame_equal(df.rank(axis=1), df.rank(axis='columns'))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
if nanops._USE_BOTTLENECK:
nanops._USE_BOTTLENECK = False
result = nanops.nansem(arr, axis=0)
self.assertFalse((result < 0).any())
nanops._USE_BOTTLENECK = True
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
assert_series_equal(kurt, kurt2, check_names=False)
self.assertTrue(kurt.name is None)
self.assertEqual(kurt2.name, 'bar')
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False):
if frame is None:
frame = self.frame
# set some NAs
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
self.assertIsInstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
self.assertIsInstance(result, Series)
self.assertTrue(len(result))
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if not tm._incompat_bottleneck_version(name):
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
self.assertEqual(lcd_dtype, result0.dtype)
self.assertEqual(lcd_dtype, result1.dtype)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if not tm._incompat_bottleneck_version(name):
self.assertTrue(np.isnan(r0).all())
self.assertTrue(np.isnan(r1).all())
def test_mode(self):
df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11],
"B": [10, 10, 10, np.nan, 3, 4],
"C": [8, 8, 8, 9, 9, 9],
"D": np.arange(6, dtype='int64'),
"E": [8, 8, 1, 1, 3, 3]})
assert_frame_equal(df[["A"]].mode(),
pd.DataFrame({"A": [12]}))
expected = pd.Series([], dtype='int64', name='D').to_frame()
assert_frame_equal(df[["D"]].mode(), expected)
expected = pd.Series([1, 3, 8], dtype='int64', name='E').to_frame()
assert_frame_equal(df[["E"]].mode(), expected)
assert_frame_equal(df[["A", "B"]].mode(),
pd.DataFrame({"A": [12], "B": [10.]}))
assert_frame_equal(df.mode(),
pd.DataFrame({"A": [12, np.nan, np.nan],
"B": [10, np.nan, np.nan],
"C": [8, 9, np.nan],
"D": [np.nan, np.nan, np.nan],
"E": [1, 3, 8]}))
# outputs in sorted order
df["C"] = list(reversed(df["C"]))
com.pprint_thing(df["C"])
com.pprint_thing(df["C"].mode())
a, b = (df[["A", "B", "C"]].mode(),
pd.DataFrame({"A": [12, np.nan],
"B": [10, np.nan],
"C": [8, 9]}))
com.pprint_thing(a)
com.pprint_thing(b)
assert_frame_equal(a, b)
# should work with heterogeneous types
df = pd.DataFrame({"A": np.arange(6, dtype='int64'),
"B": pd.date_range('2011', periods=6),
"C": list('abcdef')})
exp = pd.DataFrame({"A": pd.Series([], dtype=df["A"].dtype),
"B": pd.Series([], dtype=df["B"].dtype),
"C": pd.Series([], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
# and also when not empty
df.loc[1, "A"] = 0
df.loc[4, "B"] = df.loc[3, "B"]
df.loc[5, "C"] = 'e'
exp = pd.DataFrame({"A": pd.Series([0], dtype=df["A"].dtype),
"B": pd.Series([df.loc[3, "B"]],
dtype=df["B"].dtype),
"C": pd.Series(['e'], dtype=df["C"].dtype)})
assert_frame_equal(df.mode(), exp)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
self.assertEqual(result[0], diffs.ix[0, 'A'])
self.assertEqual(result[1], diffs.ix[0, 'B'])
result = diffs.min(axis=1)
self.assertTrue((result == diffs.ix[0, 'B']).all())
# max
result = diffs.max()
self.assertEqual(result[0], diffs.ix[2, 'A'])
self.assertEqual(result[1], diffs.ix[2, 'B'])
result = diffs.max(axis=1)
self.assertTrue((result == diffs['A']).all())
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.tseries.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')
self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
tm.assertIsInstance(axis0, Series)
tm.assertIsInstance(axis1, Series)
self.assertEqual(len(axis0), 0)
self.assertEqual(len(axis1), 0)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
self.assertTrue(the_sum.index.equals(the_mean.index))
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
self.assertEqual(means['bool'], self.frame['bool'].values.mean())
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
assert_series_equal(dm.count(), df.count())
assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isnull(df)
self.assertEqual(bools.sum(axis=1)[0], 10)
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmin, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(
Series.idxmax, axis=axis, skipna=skipna)
assert_series_equal(result, expected)
self.assertRaises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
df = DataFrame(randn(10, 4)) > 0
df.any(1)
df.all(1)
df.any(1, bool_only=True)
df.all(1, bool_only=True)
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.ix[5:10] = np.nan
frame.ix[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
assert_series_equal(result0, frame.apply(wrapper))
assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
assert_series_equal(result0, frame.apply(skipna_wrapper))
assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
self.assertRaises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail:
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
self.assertFalse(r0.any())
self.assertFalse(r1.any())
else:
self.assertTrue(r0.all())
self.assertTrue(r1.all())
# ----------------------------------------------------------------------
# Top / bottom
def test_nlargest(self):
# GH10393
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nlargest(5, 'a')
expected = df.sort_values('a', ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nlargest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nlargest(5, ['a', 'b'])
expected = df.sort_values(['a', 'b'], ascending=False).head(5)
assert_frame_equal(result, expected)
def test_nsmallest(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10])})
result = df.nsmallest(5, 'a')
expected = df.sort_values('a').head(5)
assert_frame_equal(result, expected)
def test_nsmallest_multiple_columns(self):
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
result = df.nsmallest(5, ['a', 'c'])
expected = df.sort_values(['a', 'c']).head(5)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
assert_frame_equal(result, expected)
def test_isin_empty(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
result = df.isin([])
expected = pd.DataFrame(False, df.index, df.columns)
assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with tm.assertRaises(TypeError):
df.isin('a')
with tm.assertRaises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
assert_frame_equal(result, expected)
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with tm.assertRaises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with tm.assertRaises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with tm.assertRaises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Row deduplication
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.ix[[]]
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('AAA', take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.ix[[0]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AAA', 'B'), take_last=True)
expected = df.ix[[0, 5, 6, 7]]
assert_frame_equal(result, expected)
# consider everything
df2 = df.ix[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df2.drop_duplicates(take_last=True)
with tm.assert_produces_warning(FutureWarning):
expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)
assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1, 0], [0, 2]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = pd.DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert_equal(df.duplicated(keep=keep).sum(), 0)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.ix[[]] # empty df
self.assertEqual(len(result), 0)
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(('AA', 'AB'), take_last=True)
expected = df.ix[[6, 7]]
assert_frame_equal(result, expected)
# multi column
expected = df.ix[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.ix[[0, 2, 3]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('A', take_last=True)
expected = df.ix[[1, 6, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.ix[[0, 2, 3, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.ix[[6]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['A', 'B'], take_last=True)
expected = df.ix[[1, 5, 6, 7]]
assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.ix[[]] # empty df
assert_frame_equal(result, expected)
self.assertEqual(len(result), 0)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates('C', take_last=True)
expected = df.ix[[3, 7]]
assert_frame_equal(result, expected)
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.ix[[0, 1, 2, 4]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.ix[[1]]
assert_frame_equal(result, expected)
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
result = df.drop_duplicates(['C', 'B'], take_last=True)
expected = df.ix[[1, 3, 6, 7]]
assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.ix[[]]
result = df
assert_frame_equal(result, expected)
self.assertEqual(len(df), 0)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates('A', take_last=True, inplace=True)
expected = orig.ix[[6, 7]]
result = df
assert_frame_equal(result, expected)
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.ix[[0, 1, 2, 3]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.ix[[0]]
result = df
assert_frame_equal(result, expected)
# deprecate take_last
df = orig.copy()
with tm.assert_produces_warning(FutureWarning):
df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)
expected = orig.ix[[0, 5, 6, 7]]
result = df
assert_frame_equal(result, expected)
# consider everything
orig2 = orig.ix[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
assert_frame_equal(result, expected)
# deprecate take_last
df2 = orig2.copy()
with tm.assert_produces_warning(FutureWarning):
df2.drop_duplicates(take_last=True, inplace=True)
with tm.assert_produces_warning(FutureWarning):
expected = orig2.drop_duplicates(['A', 'B'], take_last=True)
result = df2
assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with self.assertRaises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
assert_frame_equal(
df.round(partial_round_dict), expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
assert_frame_equal(
df.round(wrong_round_dict), expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with self.assertRaises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with self.assertRaises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
assert_frame_equal(
big_df.round(negative_round_dict), expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
if sys.version < LooseVersion('2.7'):
# Rounding with decimal is a ValueError in Python < 2.7
with self.assertRaises(ValueError):
df.round(nan_round_Series)
else:
with self.assertRaises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
assert_frame_equal(df.round(), round_0)
assert_frame_equal(df.round(1), df)
assert_frame_equal(df.round({'col1': 1}), df)
assert_frame_equal(df.round({'col1': 0}), round_0)
assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
self.assertTrue(rounded.index.equals(dfs.index))
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
self.assertRaises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
raise nose.SkipTest("build in round cannot be overriden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
assert_frame_equal(round(df), expected_rounded)
# Clip
def test_clip(self):
median = self.frame.median().median()
capped = self.frame.clip_upper(median)
self.assertFalse((capped.values > median).any())
floored = self.frame.clip_lower(median)
self.assertFalse((floored.values < median).any())
double = self.frame.clip(upper=median, lower=median)
self.assertFalse((double.values != median).any())
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
self.assertTrue((clipped_df.values[lb_mask] == lb).all())
self.assertTrue((clipped_df.values[ub_mask] == ub).all())
self.assertTrue((clipped_df.values[mask] ==
df.values[mask]).all())
def test_clip_against_series(self):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=0)
for i in range(2):
lb_mask = df.iloc[:, i] <= lb
ub_mask = df.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
assert_series_equal(result, lb[lb_mask], check_names=False)
self.assertEqual(result.name, i)
result = clipped_df.loc[ub_mask, i]
assert_series_equal(result, ub[ub_mask], check_names=False)
self.assertEqual(result.name, i)
assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
def test_clip_against_frame(self):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
assert_frame_equal(clipped_df[mask], df[mask])
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
result = a.dot(b1['one'])
assert_series_equal(result, expected['one'], check_names=False)
self.assertTrue(result.name is None)
# can pass correct-length arrays
row = a.ix[0].values
result = a.dot(row)
exp = a.dot(a.ix[0])
assert_series_equal(result, exp)
with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)
| gpl-2.0 |
braysia/CellTK | celltk/utils/morphsnakes.py | 1 | 11905 | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <[email protected]>"
from itertools import cycle
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_dilation, binary_erosion, \
gaussian_filter, gaussian_gradient_magnitude
class fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0,1,0]]*3), np.flipud(np.eye(3)), np.rot90([[0,1,0]]*3)]
_P3 = [np.zeros((3,3,3)) for i in range(9)]
_P3[0][:,:,1] = 1
_P3[1][:,1,:] = 1
_P3[2][1,:,:] = 1
_P3[3][:,[0,1,2],[0,1,2]] = 1
_P3[4][:,[0,1,2],[2,1,0]] = 1
_P3[5][[0,1,2],:,[0,1,2]] = 1
_P3[6][[0,1,2],:,[2,1,0]] = 1
_P3[7][[0,1,2],[0,1,2],:] = 1
_P3[8][[0,1,2],[2,1,0],:] = 1
_aux = np.zeros((0))
def SI(u):
"""SI operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in range(len(P)):
_aux[i] = binary_erosion(u, P[i])
return _aux.max(0)
def IS(u):
"""IS operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions (should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for i in range(len(P)):
_aux[i] = binary_dilation(u, P[i])
return _aux.min(0)
# SIoIS operator.
SIoIS = lambda u: SI(IS(u))
ISoSI = lambda u: IS(SI(u))
curvop = fcycle([SIoIS, ISoSI])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set (use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = u>0
outside = u<=0
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1)**2 - self.lambda2*(data - c0)**2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological Chan-Vese method."""
for i in range(iterations):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution (the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter (ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for i in range(iterations):
self.step()
def evolve_visual(msnake, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
from matplotlib import pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
fig = ppl.gcf()
fig.clf()
ax1 = fig.add_subplot(1,2,1)
if background is None:
ax1.imshow(msnake.data, cmap=ppl.cm.gray)
else:
ax1.imshow(background, cmap=ppl.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1,2,2)
ax_u = ax2.imshow(msnake.levelset)
ppl.pause(0.001)
# Iterate.
for i in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#ppl.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, levelset=None, num_iters=20):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
"""
from mayavi import mlab
import matplotlib.pyplot as ppl
if levelset is not None:
msnake.levelset = levelset
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=True)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %s/%s..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset | mit |
JosmanPS/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 206 | 1800 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(y[:, 0], y[:, 1], c="k", label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
RachitKansal/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/covariance/tests/test_graph_lasso.py | 272 | 5245 | """ Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
| bsd-3-clause |
Intel-tensorflow/tensorflow | tensorflow/python/keras/preprocessing/image.py | 6 | 49546 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
"""Set of tools for real-time data augmentation on image data."""
from keras_preprocessing import image
import numpy as np
try:
from scipy import linalg # pylint: disable=unused-import
from scipy import ndimage # pylint: disable=unused-import
except ImportError:
pass
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.preprocessing.image_dataset import image_dataset_from_directory # pylint: disable=unused-import
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.util.tf_export import keras_export
random_rotation = image.random_rotation
random_shift = image.random_shift
random_shear = image.random_shear
random_zoom = image.random_zoom
apply_channel_shift = image.apply_channel_shift
random_channel_shift = image.random_channel_shift
apply_brightness_shift = image.apply_brightness_shift
random_brightness = image.random_brightness
apply_affine_transform = image.apply_affine_transform
@keras_export('keras.preprocessing.image.smart_resize', v1=[])
def smart_resize(x, size, interpolation='bilinear'):
"""Resize images to a target size without aspect ratio distortion.
TensorFlow image datasets typically yield images that have each a different
size. However, these images need to be batched before they can be
processed by Keras layers. To be batched, images need to share the same height
and width.
You could simply do:
```python
size = (200, 200)
ds = ds.map(lambda img: tf.image.resize(img, size))
```
However, if you do this, you distort the aspect ratio of your images, since
in general they do not all have the same aspect ratio as `size`. This is
fine in many cases, but not always (e.g. for GANs this can be a problem).
Note that passing the argument `preserve_aspect_ratio=True` to `resize`
will preserve the aspect ratio, but at the cost of no longer respecting the
provided target size. Because `tf.image.resize` doesn't crop images,
your output images will still have different sizes.
This calls for:
```python
size = (200, 200)
ds = ds.map(lambda img: smart_resize(img, size))
```
Your output images will actually be `(200, 200)`, and will not be distorted.
Instead, the parts of the image that do not fit within the target size
get cropped out.
The resizing process is:
1. Take the largest centered crop of the image that has the same aspect ratio
as the target size. For instance, if `size=(200, 200)` and the input image has
size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.
2. Resize the cropped image to the target size. In the example above,
we resize the `(340, 340)` crop to `(200, 200)`.
Args:
x: Input image or batch of images (as a tensor or NumPy array).
Must be in format `(height, width, channels)` or
`(batch_size, height, width, channels)`.
size: Tuple of `(height, width)` integer. Target size.
interpolation: String, interpolation to use for resizing.
Defaults to `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`,
`area`, `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.
Returns:
Array with shape `(size[0], size[1], channels)`. If the input image was a
NumPy array, the output is a NumPy array, and if it was a TF tensor,
the output is a TF tensor.
"""
if len(size) != 2:
raise ValueError('Expected `size` to be a tuple of 2 integers, '
'but got: %s' % (size,))
img = ops.convert_to_tensor_v2_with_dispatch(x)
if img.shape.rank is not None:
if img.shape.rank < 3 or img.shape.rank > 4:
raise ValueError(
'Expected an image array with shape `(height, width, channels)`, '
'or `(batch_size, height, width, channels)` but '
'got input with incorrect rank, of shape %s' % (img.shape,))
shape = array_ops.shape(img)
if img.shape.rank == 4:
height, width = shape[1], shape[2]
static_num_channels = img.shape[-1]
else:
height, width = shape[0], shape[1]
target_height, target_width = size
crop_height = math_ops.cast(
math_ops.cast(width * target_height, 'float32') / target_width, 'int32')
crop_width = math_ops.cast(
math_ops.cast(height * target_width, 'float32') / target_height, 'int32')
# Set back to input height / width if crop_height / crop_width is not smaller.
crop_height = math_ops.minimum(height, crop_height)
crop_width = math_ops.minimum(width, crop_width)
crop_box_hstart = math_ops.cast(
math_ops.cast(height - crop_height, 'float32') / 2, 'int32')
crop_box_wstart = math_ops.cast(
math_ops.cast(width - crop_width, 'float32') / 2, 'int32')
if img.shape.rank == 4:
crop_box_start = array_ops.stack([0, crop_box_hstart, crop_box_wstart, 0])
crop_box_size = array_ops.stack([-1, crop_height, crop_width, -1])
else:
crop_box_start = array_ops.stack([crop_box_hstart, crop_box_wstart, 0])
crop_box_size = array_ops.stack([crop_height, crop_width, -1])
img = array_ops.slice(img, crop_box_start, crop_box_size)
img = image_ops.resize_images_v2(
images=img,
size=size,
method=interpolation)
if img.shape.rank == 4:
# Apparent bug in resize_images_v2 may cause shape to be lost
img.set_shape((None, None, None, static_num_channels))
if isinstance(x, np.ndarray):
return img.numpy()
return img
@keras_export('keras.utils.array_to_img',
'keras.preprocessing.image.array_to_img')
def array_to_img(x, data_format=None, scale=True, dtype=None):
"""Converts a 3D Numpy array to a PIL Image instance.
Usage:
```python
from PIL import Image
img = np.random.random(size=(100, 100, 3))
pil_img = tf.keras.preprocessing.image.array_to_img(img)
```
Args:
x: Input data, in any form that can be converted to a Numpy array.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
scale: Whether to rescale the image such that minimum and maximum values
are 0 and 255 respectively. Defaults to `True`.
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(image.array_to_img)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
return image.array_to_img(x, data_format=data_format, scale=scale, **kwargs)
@keras_export('keras.utils.img_to_array',
'keras.preprocessing.image.img_to_array')
def img_to_array(img, data_format=None, dtype=None):
"""Converts a PIL Image instance to a Numpy array.
Usage:
```python
from PIL import Image
img_data = np.random.random(size=(100, 100, 3))
img = tf.keras.preprocessing.image.array_to_img(img_data)
array = tf.keras.preprocessing.image.img_to_array(img)
```
Args:
img: Input PIL Image instance.
data_format: Image data format, can be either "channels_first" or
"channels_last". Defaults to `None`, in which case the global setting
`tf.keras.backend.image_data_format()` is used (unless you changed it,
it defaults to "channels_last").
dtype: Dtype to use. Default to `None`, in which case the global setting
`tf.keras.backend.floatx()` is used (unless you changed it, it defaults
to "float32")
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(image.img_to_array)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
return image.img_to_array(img, data_format=data_format, **kwargs)
@keras_export('keras.utils.save_img',
'keras.preprocessing.image.save_img')
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True,
**kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
Args:
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
if data_format is None:
data_format = backend.image_data_format()
image.save_img(path,
x,
data_format=data_format,
file_format=file_format,
scale=scale, **kwargs)
@keras_export('keras.utils.load_img',
'keras.preprocessing.image.load_img')
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
Usage:
```
image = tf.keras.preprocessing.image.load_img(image_path)
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = model.predict(input_arr)
```
Args:
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb".
The desired image format.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
return image.load_img(path, grayscale=grayscale, color_mode=color_mode,
target_size=target_size, interpolation=interpolation)
@keras_export('keras.preprocessing.image.Iterator')
class Iterator(image.Iterator, data_utils.Sequence):
pass
@keras_export('keras.preprocessing.image.DirectoryIterator')
class DirectoryIterator(image.DirectoryIterator, Iterator): # pylint: disable=inconsistent-mro
"""Iterator capable of reading images from a directory on disk.
Args:
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
- `"binary"`: binary targets (if there are only two classes),
- `"categorical"`: categorical targets,
- `"sparse"`: integer targets,
- `"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
- `None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
dtype: Dtype to use for generated arrays.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest',
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.ImageDataGenerator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(DirectoryIterator, self).__init__(
directory, image_data_generator,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation,
**kwargs)
@keras_export('keras.preprocessing.image.NumpyArrayIterator')
class NumpyArrayIterator(image.NumpyArrayIterator, Iterator):
"""Iterator yielding data from a Numpy array.
Args:
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
dtype: Dtype to use for the generated arrays.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.NumpyArrayIterator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(NumpyArrayIterator, self).__init__(
x, y, image_data_generator,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
**kwargs)
class DataFrameIterator(image.DataFrameIterator, Iterator): # pylint: disable=inconsistent-mro
"""Iterator capable of reading images from a directory on disk as a dataframe.
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images in
a string column. It should include other column/s depending on the
`class_mode`:
- if `class_mode` is `"categorical"` (default value) it must include
the `y_col` column with the class/es of each image. Values in
column can be string/list/tuple if a single class or list/tuple if
multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include the
given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain the
columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
image_data_generator: Instance of `ImageDataGenerator` to use for random
transformations and normalization. If None, no transformations and
normalizations are made.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`. Color mode to read
images.
classes: Optional list of strings, classes to use (e.g. `["dogs",
"cats"]`). If None, all classes in `y_col` will be used.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", "sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels. Supports
multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in `model.predict()`).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures being yielded,
in a viewable format. This is useful for visualizing the random
transformations being applied, for debugging purposes.
save_prefix: String prefix to use for saving sample images (if
`save_to_dir` is set).
save_format: Format to use for saving sample images (if `save_to_dir` is
set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are "nearest", "bilinear", and "bicubic". If PIL version 1.1.3
or newer is installed, "lanczos" is also supported. If PIL version 3.4.0
or newer is installed, "box" and "hamming" are also supported. By
default, "nearest" is used.
dtype: Dtype to use for the generated arrays.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option
can lead to speed-up in the instantiation of this class. Default: `True`.
"""
def __init__(
self,
dataframe,
directory=None,
image_data_generator=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format='channels_last',
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
dtype='float32',
validate_filenames=True):
super(DataFrameIterator, self).__init__(
dataframe=dataframe,
directory=directory,
image_data_generator=image_data_generator,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
dtype=dtype,
validate_filenames=validate_filenames
)
@keras_export('keras.preprocessing.image.ImageDataGenerator')
class ImageDataGenerator(image.ImageDataGenerator):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
Args:
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats
in the interval [-1.0, +1.0).
brightness_range: Tuple or list of two floats. Range for picking
a brightness shift value from.
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(after applying all other transformations).
preprocessing_function: function that will be applied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
dtype: Dtype to use for the generated arrays.
Raises:
ValueError: If the value of the argument, `data_format` is other than
`"channels_last"` or `"channels_first"`.
ValueError: If the value of the argument, `validation_split` > 1
or `validation_split` < 0.
Examples:
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit(datagen.flow(x_train, y_train, batch_size=32,
subset='training'),
validation_data=datagen.flow(x_train, y_train,
batch_size=8, subset='validation'),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0,
dtype=None):
if data_format is None:
data_format = backend.image_data_format()
kwargs = {}
if 'dtype' in tf_inspect.getfullargspec(
image.ImageDataGenerator.__init__)[0]:
if dtype is None:
dtype = backend.floatx()
kwargs['dtype'] = dtype
super(ImageDataGenerator, self).__init__(
featurewise_center=featurewise_center,
samplewise_center=samplewise_center,
featurewise_std_normalization=featurewise_std_normalization,
samplewise_std_normalization=samplewise_std_normalization,
zca_whitening=zca_whitening,
zca_epsilon=zca_epsilon,
rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range,
brightness_range=brightness_range,
shear_range=shear_range,
zoom_range=zoom_range,
channel_shift_range=channel_shift_range,
fill_mode=fill_mode,
cval=cval,
horizontal_flip=horizontal_flip,
vertical_flip=vertical_flip,
rescale=rescale,
preprocessing_function=preprocessing_function,
data_format=data_format,
validation_split=validation_split,
**kwargs)
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
sample_weight=None,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None):
"""Takes data & label arrays, generates batches of augmented data.
Args:
x: Input data. Numpy array of rank 4 or a tuple. If tuple, the first
element should contain the images and the second element another numpy
array or a list of numpy arrays that gets passed to the output without
any modifications. Can be used to feed the model miscellaneous data
along with the images. In case of grayscale data, the channels axis of
the image array should have value 1, in case of RGB data, it should
have value 3, and in case of RGBA data, it should have value 4.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str (default: `''`). Prefix to use for filenames of saved
pictures (only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
Returns:
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
Raises:
ValueError: If the Value of the argument, `subset` is other than
"training" or "validation".
"""
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
Args:
directory: string, path to the target directory. It should contain one
subdirectory per class. Any PNG, JPG, BMP, PPM or TIF images inside
each of the subdirectories directory tree will be included in the
generator. See [this script](
https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`, defaults to `(256,
256)`. The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1, 3, or 4 channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None. If not provided, the list
of classes will be automatically inferred from the subdirectory
names/structure under `directory`, where each subdirectory will be
treated as a different class (and the order of the classes, which
will map to the label indices, will be alphanumeric). The
dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
- "sparse" will be 1D integer labels,
- "input" will be images identical to input images (mainly used to
work with autoencoders).
- If None, no labels are returned (the generator will only yield
batches of image data, which is useful to use with
`model.predict()`).
Please note that in case of class_mode None, the data still needs to
reside in a subdirectory of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True) If set to False,
sorts the data in alphanumeric order.
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
Returns:
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def flow_from_dataframe(self,
dataframe,
directory=None,
x_col='filename',
y_col='class',
weight_col=None,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
**kwargs):
"""Takes the dataframe and the path to a directory + generates batches.
The generated batches contain augmented/normalized data.
**A simple tutorial can be found **[here](
http://bit.ly/keras_flow_from_dataframe).
Args:
dataframe: Pandas dataframe containing the filepaths relative to
`directory` (or absolute paths if `directory` is None) of the images
in a string column. It should include other column/s
depending on the `class_mode`:
- if `class_mode` is `"categorical"` (default value) it must include
the `y_col` column with the class/es of each image. Values in
column can be string/list/tuple if a single class or list/tuple if
multiple classes.
- if `class_mode` is `"binary"` or `"sparse"` it must include the
given `y_col` column with class values as strings.
- if `class_mode` is `"raw"` or `"multi_output"` it should contain
the columns specified in `y_col`.
- if `class_mode` is `"input"` or `None` no extra column is needed.
directory: string, path to the directory to read images from. If `None`,
data in `x_col` column should be absolute paths.
x_col: string, column in `dataframe` that contains the filenames (or
absolute paths if `directory` is `None`).
y_col: string or list, column/s in `dataframe` that has the target data.
weight_col: string, column in `dataframe` that contains the sample
weights. Default: `None`.
target_size: tuple of integers `(height, width)`, default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: one of "grayscale", "rgb", "rgba". Default: "rgb". Whether
the images will be converted to have 1 or 3 color channels.
classes: optional list of classes (e.g. `['dogs', 'cats']`). Default is
None. If not provided, the list of classes will be automatically
inferred from the `y_col`, which will map to the label indices, will
be alphanumeric). The dictionary containing the mapping from class
names to class indices can be obtained via the attribute
`class_indices`.
class_mode: one of "binary", "categorical", "input", "multi_output",
"raw", sparse" or None. Default: "categorical".
Mode for yielding the targets:
- `"binary"`: 1D numpy array of binary labels,
- `"categorical"`: 2D numpy array of one-hot encoded labels.
Supports multi-label output.
- `"input"`: images identical to input images (mainly used to work
with autoencoders),
- `"multi_output"`: list with the values of the different columns,
- `"raw"`: numpy array of values in `y_col` column(s),
- `"sparse"`: 1D numpy array of integer labels,
- `None`, no targets are returned (the generator will only yield
batches of image data, which is useful to use in
`model.predict()`).
batch_size: size of the batches of data (default: 32).
shuffle: whether to shuffle the data (default: True)
seed: optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None). This allows you to optionally
specify a directory to which to save the augmented pictures being
generated (useful for visualizing what you are doing).
save_prefix: str. Prefix to use for filenames of saved pictures (only
relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg", "bmp", "pdf", "ppm", "gif",
"tif", "jpg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image. Supported
methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
supported. By default, `"nearest"` is used.
validate_filenames: Boolean, whether to validate image filenames in
`x_col`. If `True`, invalid images will be ignored. Disabling this
option can lead to speed-up in the execution of this function.
Defaults to `True`.
**kwargs: legacy arguments for raising deprecation warnings.
Returns:
A `DataFrameIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
if 'has_ext' in kwargs:
tf_logging.warning(
'has_ext is deprecated, filenames in the dataframe have '
'to match the exact filenames in disk.', DeprecationWarning)
if 'sort' in kwargs:
tf_logging.warning(
'sort is deprecated, batches will be created in the'
'same order than the filenames provided if shuffle'
'is set to False.', DeprecationWarning)
if class_mode == 'other':
tf_logging.warning(
'`class_mode` "other" is deprecated, please use '
'`class_mode` "raw".', DeprecationWarning)
class_mode = 'raw'
if 'drop_duplicates' in kwargs:
tf_logging.warning(
'drop_duplicates is deprecated, you can drop duplicates '
'by using the pandas.DataFrame.drop_duplicates method.',
DeprecationWarning)
return DataFrameIterator(
dataframe,
directory,
self,
x_col=x_col,
y_col=y_col,
weight_col=weight_col,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset,
interpolation=interpolation,
validate_filenames=validate_filenames)
keras_export('keras.preprocessing.image.random_rotation')(random_rotation)
keras_export('keras.preprocessing.image.random_shift')(random_shift)
keras_export('keras.preprocessing.image.random_shear')(random_shear)
keras_export('keras.preprocessing.image.random_zoom')(random_zoom)
keras_export(
'keras.preprocessing.image.apply_channel_shift')(apply_channel_shift)
keras_export(
'keras.preprocessing.image.random_channel_shift')(random_channel_shift)
keras_export(
'keras.preprocessing.image.apply_brightness_shift')(apply_brightness_shift)
keras_export('keras.preprocessing.image.random_brightness')(random_brightness)
keras_export(
'keras.preprocessing.image.apply_affine_transform')(apply_affine_transform)
| apache-2.0 |
thomas-bottesch/fcl | examples/python/kmeans/plotting/calculations_evaluation.py | 1 | 3784 | from __future__ import print_function
import fcl
import os
import matplotlib
import matplotlib.pyplot as plt
from os.path import abspath, join, dirname
from fcl import kmeans
from fcl.datasets import load_sector_dataset, load_usps_dataset
def plot_overall_duration_subplot(ax, algorithm_results):
# create subplot showing the overall duration of the algorithm
i = 0
tick_pos = []
for algorithm in algorithm_results:
ax.bar(i, algorithm_results[algorithm]['duration_kmeans'] / 1000)
tick_pos.append(i + 0.5)
i += 1
ax.set_xticks(tick_pos, minor=False)
ax.set_xticklabels(algorithm_results.keys(), fontdict=None, minor=False, rotation=45)
ax.set_ylabel('time / s')
ax.set_title("Overall duration")
def plot_fulldist_calcs_subplot(ax, algorithm_results):
# create subplot showing the number of full distance calculations for every iteration
for algorithm in algorithm_results:
full_distance_calcs = algorithm_results[algorithm]['iteration_full_distance_calcs']
if 'iteration_bv_calcs' in algorithm_results[algorithm]:
for i in range(algorithm_results[algorithm]['no_iterations']):
# block vector calculations also have certain costs
# these costs are added here to the full distance calculations
full_distance_calcs[i] += algorithm_results[algorithm]['iteration_bv_calcs'][i] \
* algorithm_results[algorithm]['additional_params']['bv_annz']
ax.plot(range(algorithm_results[algorithm]['no_iterations']),
full_distance_calcs, '-', linewidth=3, label = algorithm)
ax.legend()
ax.grid(True)
ax.set_xlabel('iteration')
ax.set_ylabel('full distance calculations')
ax.set_title("Full distance calculations per iteration")
def plot_iteration_duration_subplot(ax, algorithm_results):
# create subplot showing the number of full distance calculations for every iteration
for algorithm in algorithm_results:
print(algorithm
, algorithm_results[algorithm]['iteration_durations']
, sum(algorithm_results[algorithm]['iteration_durations'])
, len(algorithm_results[algorithm]['iteration_durations'])
, algorithm_results[algorithm]['no_iterations']
, list(map(lambda x: x / 1000.0, algorithm_results[algorithm]['iteration_durations'])))
ax.plot(range(algorithm_results[algorithm]['no_iterations']),
list(map(lambda x: x / 1000.0, algorithm_results[algorithm]['iteration_durations'])), '-', linewidth=3, label = algorithm)
ax.legend()
ax.grid(True)
ax.set_xlabel('iteration')
ax.set_ylabel('time / s')
ax.set_title("Duration of every iter")
def do_evaluations(dataset_path):
print("Doing evaluations for dataset %s"%dataset_path)
algorithm_results = {'bv_kmeans': None, 'yinyang': None}
for algorithm in algorithm_results:
print("Executing k-means with algorithm: %s"%algorithm)
km = kmeans.KMeans(n_jobs=1, no_clusters=300, algorithm=algorithm, init='random', seed = 0, verbose = False)
km.fit(dataset_path)
algorithm_results[algorithm] = km.get_tracked_params()
f, (a0, a1, a2) = plt.subplots(1,3, gridspec_kw = {'width_ratios':[1, 4, 4]}, figsize=(18, 8))
plot_overall_duration_subplot(a0, algorithm_results)
plot_iteration_duration_subplot(a1, algorithm_results)
plot_fulldist_calcs_subplot(a2, algorithm_results)
f.tight_layout()
destination_filename = join(dirname( __file__ ), "calculations_evaluation.png")
plt.savefig(destination_filename)
print("plot was saved in the current folder to: %s"%destination_filename)
if __name__ == "__main__":
ds_folder = abspath(join(dirname( __file__ ), os.pardir, os.pardir, os.pardir, 'datasets'))
do_evaluations(load_sector_dataset(ds_folder))
| mit |
nicolas998/Op_Radar | 06_Codigos/viejos/Cron_Figura_SimExtrapol.py | 2 | 1343 | #!/usr/bin/env python
import os
import datetime as dt
import pandas as pd
from multiprocessing import Pool
import numpy as np
#-------------------------------------------------------------------
#FUNBCIONES LOCALES
#-------------------------------------------------------------------
ruta_qsim = '/home/renea998/Simulaciones/extrapolated/'
ruta_ejec = '/home/renea998/scripts/Figuras_Qsim.py'
ruta_Figuras = '/home/renea998/FigExtrapolaciones/'
servidor = '[email protected]:/var/www/nicolas/FigExtrapolated/'
#-------------------------------------------------------------------
#GENERA LAS LISTAS DE LOS COMANDOS DE CREAR FIGURA Y ENVIAR FIGURA
#-------------------------------------------------------------------
Lista = range(1,291)
ListaSelect = np.random.choice(Lista,50)
ListaSelect = ListaSelect.tolist()
ListaSelect.insert(0,1)
Lc1 = []
for i in ListaSelect:
Lc1.append( ruta_ejec+' '+ruta_qsim+' '+str(i)+' '+ruta_Figuras+'Qsim_nodo_'+str(i)+'.png')
for i in Lc1:
os.system(i)
#-------------------------------------------------------------------
#EJECUTA LOS COMANDO EN PARALELO
#-------------------------------------------------------------------
#try:
# p = Pool(processes = 10)
# p.map(os.system, Lc1)
#finally:
# p.close()
# p.join()
comando = 'scp '+ruta_Figuras+'Qsim_nodo_*.png '+servidor
os.system(comando)
| gpl-3.0 |
lenovor/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/compose/tests/test_target.py | 10 | 12393 | import numpy as np
import pytest
from sklearn.base import clone
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.dummy import DummyRegressor
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import assert_no_warnings
from sklearn.preprocessing import FunctionTransformer
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
from sklearn import datasets
from sklearn.compose import TransformedTargetRegressor
friedman = datasets.make_friedman1(random_state=0)
def test_transform_target_regressor_error():
X, y = friedman
# provide a transformer and functions at the same time
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=StandardScaler(),
func=np.exp, inverse_func=np.log)
with pytest.raises(ValueError,
match="'transformer' and functions"
" 'func'/'inverse_func' cannot both be set."):
regr.fit(X, y)
# fit with sample_weight with a regressor which does not support it
sample_weight = np.ones((y.shape[0],))
regr = TransformedTargetRegressor(regressor=OrthogonalMatchingPursuit(),
transformer=StandardScaler())
with pytest.raises(TypeError, match=r"fit\(\) got an unexpected "
"keyword argument 'sample_weight'"):
regr.fit(X, y, sample_weight=sample_weight)
# func is given but inverse_func is not
regr = TransformedTargetRegressor(func=np.exp)
with pytest.raises(ValueError, match="When 'func' is provided, "
"'inverse_func' must also be provided"):
regr.fit(X, y)
def test_transform_target_regressor_invertible():
X, y = friedman
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.sqrt, inverse_func=np.log,
check_inverse=True)
assert_warns_message(UserWarning, "The provided functions or transformer"
" are not strictly inverse of each other.",
regr.fit, X, y)
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.sqrt, inverse_func=np.log)
regr.set_params(check_inverse=False)
assert_no_warnings(regr.fit, X, y)
def _check_standard_scaled(y, y_pred):
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
assert_allclose((y - y_mean) / y_std, y_pred)
def _check_shifted_by_one(y, y_pred):
assert_allclose(y + 1, y_pred)
def test_transform_target_regressor_functions():
X, y = friedman
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.log, inverse_func=np.exp)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
assert_allclose(np.log(y), y_tran)
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran.reshape(-1, 1)).squeeze())
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
def test_transform_target_regressor_functions_multioutput():
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
regr = TransformedTargetRegressor(regressor=LinearRegression(),
func=np.log, inverse_func=np.exp)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y)
assert_allclose(np.log(y), y_tran)
assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
@pytest.mark.parametrize("X,y", [friedman,
(friedman[0],
np.vstack((friedman[1],
friedman[1] ** 2 + 1)).T)])
def test_transform_target_regressor_1d_transformer(X, y):
# All transformer in scikit-learn expect 2D data. FunctionTransformer with
# validate=False lift this constraint without checking that the input is a
# 2D vector. We check the consistency of the data shape using a 1D and 2D y
# array.
transformer = FunctionTransformer(func=lambda x: x + 1,
inverse_func=lambda x: x - 1)
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_shifted_by_one(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
@pytest.mark.parametrize("X,y", [friedman,
(friedman[0],
np.vstack((friedman[1],
friedman[1] ** 2 + 1)).T)])
def test_transform_target_regressor_2d_transformer(X, y):
# Check consistency with transformer accepting only 2D array and a 1D/2D y
# array.
transformer = StandardScaler()
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
if y.ndim == 1: # create a 2D array and squeeze results
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
else:
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
if y.ndim == 1: # create a 2D array and squeeze results
lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
else:
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_2d_transformer_multioutput():
# Check consistency with transformer accepting only 2D array and a 2D y
# array.
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
transformer = StandardScaler()
regr = TransformedTargetRegressor(regressor=LinearRegression(),
transformer=transformer)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(
y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_multi_to_single():
X = friedman[0]
y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
def func(y):
out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
return out[:, np.newaxis]
def inverse_func(y):
return y
tt = TransformedTargetRegressor(func=func, inverse_func=inverse_func,
check_inverse=False)
tt.fit(X, y)
y_pred_2d_func = tt.predict(X)
assert y_pred_2d_func.shape == (100, 1)
# force that the function only return a 1D array
def func(y):
return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
tt = TransformedTargetRegressor(func=func, inverse_func=inverse_func,
check_inverse=False)
tt.fit(X, y)
y_pred_1d_func = tt.predict(X)
assert y_pred_1d_func.shape == (100, 1)
assert_allclose(y_pred_1d_func, y_pred_2d_func)
class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
assert isinstance(X, np.ndarray)
return self
def transform(self, X):
assert isinstance(X, np.ndarray)
return X
def inverse_transform(self, X):
assert isinstance(X, np.ndarray)
return X
class DummyCheckerListRegressor(DummyRegressor):
def fit(self, X, y, sample_weight=None):
assert isinstance(X, list)
return super().fit(X, y, sample_weight)
def predict(self, X):
assert isinstance(X, list)
return super().predict(X)
def test_transform_target_regressor_ensure_y_array():
# check that the target ``y`` passed to the transformer will always be a
# numpy array. Similarly, if ``X`` is passed as a list, we check that the
# predictor receive as it is.
X, y = friedman
tt = TransformedTargetRegressor(transformer=DummyCheckerArrayTransformer(),
regressor=DummyCheckerListRegressor(),
check_inverse=False)
tt.fit(X.tolist(), y.tolist())
tt.predict(X.tolist())
with pytest.raises(AssertionError):
tt.fit(X, y.tolist())
with pytest.raises(AssertionError):
tt.predict(X)
class DummyTransformer(TransformerMixin, BaseEstimator):
"""Dummy transformer which count how many time fit was called."""
def __init__(self, fit_counter=0):
self.fit_counter = fit_counter
def fit(self, X, y=None):
self.fit_counter += 1
return self
def transform(self, X):
return X
def inverse_transform(self, X):
return X
@pytest.mark.parametrize("check_inverse", [False, True])
def test_transform_target_regressor_count_fit(check_inverse):
# regression test for gh-issue #11618
# check that we only call a single time fit for the transformer
X, y = friedman
ttr = TransformedTargetRegressor(
transformer=DummyTransformer(), check_inverse=check_inverse
)
ttr.fit(X, y)
assert ttr.transformer_.fit_counter == 1
class DummyRegressorWithExtraFitParams(DummyRegressor):
def fit(self, X, y, sample_weight=None, check_input=True):
# on the test below we force this to false, we make sure this is
# actually passed to the regressor
assert not check_input
return super().fit(X, y, sample_weight)
def test_transform_target_regressor_pass_fit_parameters():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(),
transformer=DummyTransformer()
)
regr.fit(X, y, check_input=False)
assert regr.transformer_.fit_counter == 1
def test_transform_target_regressor_route_pipeline():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(),
transformer=DummyTransformer()
)
estimators = [
('normalize', StandardScaler()), ('est', regr)
]
pip = Pipeline(estimators)
pip.fit(X, y, **{'est__check_input': False})
assert regr.transformer_.fit_counter == 1
| bsd-3-clause |
phdowling/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
crichardson17/starburst_atlas | SEDs/GenevaRotationMetallicity/cont.py | 1 | 22868 | #Begin by importing everything necessary
import csv
import matplotlib.pyplot as plt
from numpy import *
# The user must set this to the directory and file that needs to be read in.
inputfile1 = 'Rotation_cont_002_1.con'
inputfile2 = 'Rotation_cont_002_2.con'
inputfile3 = 'Rotation_cont_002_3.con'
inputfile4 = 'Rotation_cont_002_4.con'
inputfile5 = 'Rotation_cont_002_5.con'
inputfile6 = 'Rotation_cont_002_6.con'
inputfile29 = 'Rotation_cont_002_7.con'
inputfile30 = 'Rotation_cont_002_8.con'
inputfile7 = 'Rotation_cont_008_1.con'
inputfile8 = 'Rotation_cont_008_2.con'
inputfile9 = 'Rotation_cont_008_3.con'
inputfile10 = 'Rotation_cont_008_4.con'
inputfile11 = 'Rotation_cont_008_5.con'
inputfile12 = 'Rotation_cont_008_6.con'
inputfile25 = 'Rotation_cont_008_7.con'
inputfile26 = 'Rotation_cont_008_8.con'
#inputfile29 = 'Rotation_cont_008_10.con'
#it does reach steady state after 8 Myr
inputfile13 = 'Rotation_inst_002_1.con'
inputfile14 = 'Rotation_inst_002_2.con'
inputfile15 = 'Rotation_inst_002_3.con'
inputfile16 = 'Rotation_inst_002_4.con'
inputfile17 = 'Rotation_inst_002_5.con'
inputfile18 = 'Rotation_inst_002_6.con'
inputfile31 = 'Rotation_inst_002_7.con'
inputfile32 = 'Rotation_inst_002_8.con'
inputfile19 = 'Rotation_inst_008_1.con'
inputfile20 = 'Rotation_inst_008_2.con'
inputfile21 = 'Rotation_inst_008_3.con'
inputfile22 = 'Rotation_inst_008_4.con'
inputfile23 = 'Rotation_inst_008_5.con'
inputfile24 = 'Rotation_inst_008_6.con'
inputfile27 = 'Rotation_inst_008_7.con'
inputfile28 = 'Rotation_inst_008_8.con'
inputfile33 = 'padova_inst_1.con'
inputfile34 = 'padova_inst_2.con'
inputfile35 = 'padova_inst_3.con'
inputfile36 = 'padova_inst_4.con'
inputfile37 = 'padova_inst_5.con'
inputfile38 = 'padova_inst_6.con'
inputfile39 = 'padova_inst_7.con'
inputfile40 = 'padova_inst_8.con'
inputfile41 = 'padova_cont_1.con'
inputfile42 = 'padova_cont_2.con'
inputfile43 = 'padova_cont_3.con'
inputfile44 = 'padova_cont_4.con'
inputfile45 = 'padova_cont_5.con'
inputfile46 = 'padova_cont_6.con'
inputfile47 = 'padova_cont_7.con'
inputfile48 = 'padova_cont_8.con'
#-------------------------------------------------------------
datastarburst1 = [];
with open(inputfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst1.append(row);
datastarburst1 = asarray(datastarburst1)
datastarburst2 = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst2.append(row);
datastarburst2 = asarray(datastarburst2)
datastarburst3 = [];
with open(inputfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst3.append(row);
datastarburst3 = asarray(datastarburst3)
datastarburst4 = [];
with open(inputfile4, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst4.append(row);
datastarburst4 = asarray(datastarburst4)
datastarburst5 = [];
with open(inputfile5, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst5.append(row);
datastarburst5 = asarray(datastarburst5)
datastarburst6 = [];
with open(inputfile6, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst6.append(row);
datastarburst6 = asarray(datastarburst6)
#-------------------------------------------------------------
datastarburst7 = [];
with open(inputfile7, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst7.append(row);
datastarburst7 = asarray(datastarburst7)
datastarburst8 = [];
with open(inputfile8, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst8.append(row);
datastarburst8 = asarray(datastarburst8)
datastarburst9 = [];
with open(inputfile9, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst9.append(row);
datastarburst9 = asarray(datastarburst9)
datastarburst10 = [];
with open(inputfile10, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst10.append(row);
datastarburst10 = asarray(datastarburst10)
datastarburst11 = [];
with open(inputfile11, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst11.append(row);
datastarburst11= asarray(datastarburst11)
datastarburst12 = [];
with open(inputfile12, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst12.append(row);
datastarburst12 = asarray(datastarburst12)
#-------------------------------------------------------------
datastarburst13 = [];
with open(inputfile13, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst13.append(row);
datastarburst13 = asarray(datastarburst13)
datastarburst14 = [];
with open(inputfile14, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst14.append(row);
datastarburst14 = asarray(datastarburst14)
datastarburst15 = [];
with open(inputfile15, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst15.append(row);
datastarburst15 = asarray(datastarburst15)
datastarburst16 = [];
with open(inputfile16, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst16.append(row);
datastarburst16 = asarray(datastarburst16)
datastarburst17 = [];
with open(inputfile17, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst17.append(row);
datastarburst17 = asarray(datastarburst17)
datastarburst18 = [];
with open(inputfile18, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst18.append(row);
datastarburst18 = asarray(datastarburst18)
#-------------------------------------------------------------
datastarburst19 = [];
with open(inputfile19, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst19.append(row);
datastarburst19 = asarray(datastarburst19)
datastarburst20 = [];
with open(inputfile20, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst20.append(row);
datastarburst20 = asarray(datastarburst20)
datastarburst21 = [];
with open(inputfile21, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst21.append(row);
datastarburst21 = asarray(datastarburst21)
datastarburst22 = [];
with open(inputfile22, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst22.append(row);
datastarburst22 = asarray(datastarburst22)
datastarburst23 = [];
with open(inputfile23, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst23.append(row);
datastarburst23 = asarray(datastarburst23)
datastarburst24 = [];
with open(inputfile23, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst24.append(row);
datastarburst24 = asarray(datastarburst24)
#---
datastarburst25 = [];
with open(inputfile25, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst25.append(row);
datastarburst25 = asarray(datastarburst25)
datastarburst26 = [];
with open(inputfile26, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst26.append(row);
datastarburst26 = asarray(datastarburst26)
datastarburst27 = [];
with open(inputfile27, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst27.append(row);
datastarburst27 = asarray(datastarburst27)
datastarburst28 = [];
with open(inputfile28, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst28.append(row);
datastarburst28 = asarray(datastarburst28)
#----
datastarburst29 = [];
with open(inputfile29, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst29.append(row);
datastarburst29 = asarray(datastarburst29)
datastarburst30 = [];
with open(inputfile30, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst30.append(row);
datastarburst30 = asarray(datastarburst30)
datastarburst31 = [];
with open(inputfile31, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst31.append(row);
datastarburst31 = asarray(datastarburst31)
datastarburst32 = [];
with open(inputfile32, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst32.append(row);
datastarburst32 = asarray(datastarburst32)
'''
datastarburst29 = [];
with open(inputfile29, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst29.append(row);
datastarburst29 = asarray(datastarburst29)
'''
datastarburst33 = [];
with open(inputfile33, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst33.append(row);
datastarburst33 = asarray(datastarburst33)
datastarburst34 = [];
with open(inputfile34, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst34.append(row);
datastarburst34 = asarray(datastarburst34)
datastarburst35 = [];
with open(inputfile35, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst35.append(row);
datastarburst35 = asarray(datastarburst35)
datastarburst36 = [];
with open(inputfile36, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst36.append(row);
datastarburst36 = asarray(datastarburst36)
datastarburst37 = [];
with open(inputfile37, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst37.append(row);
datastarburst37 = asarray(datastarburst37)
datastarburst38 = [];
with open(inputfile38, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst38.append(row);
datastarburst38 = asarray(datastarburst38)
datastarburst39 = [];
with open(inputfile39, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst39.append(row);
datastarburst39 = asarray(datastarburst39)
datastarburst40 = [];
with open(inputfile40, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst40.append(row);
datastarburst40 = asarray(datastarburst40)
datastarburst41 = [];
with open(inputfile41, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst41.append(row);
datastarburst41 = asarray(datastarburst41)
datastarburst42 = [];
with open(inputfile42, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst42.append(row);
datastarburst42 = asarray(datastarburst42)
datastarburst43 = [];
with open(inputfile43, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst43.append(row);
datastarburst43 = asarray(datastarburst43)
datastarburst44 = [];
with open(inputfile44, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst44.append(row);
datastarburst44 = asarray(datastarburst44)
datastarburst45 = [];
with open(inputfile45, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst45.append(row);
datastarburst45 = asarray(datastarburst45)
datastarburst46 = [];
with open(inputfile46, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst46.append(row);
datastarburst46 = asarray(datastarburst46)
datastarburst47 = [];
with open(inputfile47, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst47.append(row);
datastarburst47 = asarray(datastarburst47)
datastarburst48 = [];
with open(inputfile48, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
datastarburst48.append(row);
datastarburst48 = asarray(datastarburst48)
#-------------------------------------------------------------
# Extract x and y from the first two columns of the array.
x = double(datastarburst1[:4022,0])
#if eV is desired, uncomment the following:
for i in range(len(x)):
x[i] = 1.240/x[i]
y1 = double(datastarburst1[:4022,1])
y2 = double(datastarburst2[:4022,1])
y3 = double(datastarburst3[:4022,1])
y4 = double(datastarburst4[:4022,1])
y5 = double(datastarburst5[:4022,1])
y6 = double(datastarburst6[:4022,1])
y7 = double(datastarburst7[:4049,1])
y8 = double(datastarburst8[:4049,1])
y9 = double(datastarburst9[:4049,1])
y10 = double(datastarburst10[:4049,1])
y11 = double(datastarburst11[:4049,1])
y12 = double(datastarburst12[:4049,1])
y13 = double(datastarburst13[:4022,1])
y14 = double(datastarburst14[:4022,1])
y15 = double(datastarburst15[:4022,1])
y16 = double(datastarburst16[:4022,1])
y17 = double(datastarburst17[:4022,1])
y18 = double(datastarburst18[:4022,1])
y19 = double(datastarburst19[:3963,1])
y20 = double(datastarburst20[:3963,1])
y21 = double(datastarburst21[:3963,1])
y22 = double(datastarburst22[:3963,1])
y23 = double(datastarburst23[:3963,1])
y24 = double(datastarburst24[:3963,1])
y25 = double(datastarburst25[:4049,1])
y26 = double(datastarburst26[:4049,1])
y27 = double(datastarburst27[:3963,1])
y28 = double(datastarburst28[:3963,1])
y29 = double(datastarburst29[:4022,1])
y30 = double(datastarburst30[:4022,1])
y31 = double(datastarburst31[:4022,1])
y32 = double(datastarburst32[:4022,1])
y33 = double(datastarburst31[:4049,1])
y34 = double(datastarburst32[:4049,1])
y35 = double(datastarburst33[:4049,1])
y36 = double(datastarburst34[:4049,1])
y37 = double(datastarburst35[:4049,1])
y38 = double(datastarburst36[:4049,1])
y39 = double(datastarburst31[:4049,1])
y40 = double(datastarburst32[:4049,1])
y41 = double(datastarburst33[:4049,1])
y42 = double(datastarburst34[:4049,1])
y43 = double(datastarburst35[:4049,1])
y44 = double(datastarburst36[:4049,1])
y45 = double(datastarburst31[:4049,1])
y46 = double(datastarburst32[:4049,1])
y47 = double(datastarburst33[:4049,1])
y48 = double(datastarburst34[:4049,1])
#y29 = double(datastarburst29[:4049,1])
'''
y25 = double(datastarburst25[:3950,1])
y26 = double(datastarburst26[:3950,1])
y27 = double(datastarburst27[:3950,1])
y28 = double(datastarburst28[:3950,1])
y29 = double(datastarburst29[:3950,1])
y30 = double(datastarburst30[:3950,1])
y31 = double(datastarburst31[:4049,1])
y32 = double(datastarburst32[:4049,1])
y33 = double(datastarburst33[:4049,1])
y34 = double(datastarburst34[:4049,1])
y35 = double(datastarburst35[:4049,1])
y36 = double(datastarburst36[:4049,1])
#there are specific values here because for some reason some of the outputs were shorter (perhaps the end temp was reached faster in some) they only differed by about 100 so I've cut them off
#-------------------------------------------------------------
# Plot x vs. y in 4 subplots
fig = plt.figure(1)
#sp1 = plt.subplot(321) #rows columns location of this parcticular plot
p1 = plt.plot(x,y1, 'k', linewidth=1.5, label="1 Myr")
p2 = plt.plot(x,y2, 'c', linewidth=1.5, label="2 Myr")
p3 = plt.plot(x,y3, 'y', linewidth=1.5, label="3 Myr")
p4 = plt.plot(x,y4, 'r', linewidth=1.5, label="4 Myr")
p5 = plt.plot(x,y5, 'g', linewidth=1.5, label="5 Myr")
p6 = plt.plot(x,y6, 'b', linewidth=1.5, label="6 Myr")
p29 = plt.plot(x,y29, 'm', linewidth=1.5, label="7 Myr")
p30 = plt.plot(x,y30, '.75', linewidth=1.5, label="8 Myr")
plt.xlim(10**1, 10**2)
plt.ylim(10**1, 10**6)
plt.title('Starburst Geneva Track Continuous with Rotation- 0.002 Metallicity', fontsize=30)
#plt.ylabel(r'$4 \pi \nu J_\nu$ (erg $s^{-1} cm ^ {-2})$', fontsize=10)
#plt.tick_params(axis='x', bottom='off',labelbottom='off')
#plt.xscale('log', fontsize=10)
#plt.yscale('log', fontsize= 10)
#plt.legend(prop={'size':7})
#--------------------------------------------------------
fig = plt.figure(1)
#sp2 = plt.subplot(322)
p7 = plt.plot(x,y7, 'k', linewidth=1.5, label="1 Myr")
p8 = plt.plot(x,y8, 'c', linewidth=1.5, label="2 Myr")
p9 = plt.plot(x,y9, 'y', linewidth=1.5, label="3 Myr")
p10 = plt.plot(x,y10, 'r', linewidth=1.5, label="4 Myr")
p11 = plt.plot(x,y11, 'g', linewidth=1.5, label="5 Myr")
p12 = plt.plot(x,y12, 'b', linewidth=1.5, label="6 Myr")
p25 = plt.plot(x,y25, 'm', linewidth=1.5, label="7 Myr")
p26 = plt.plot(x,y26, '.75', linewidth=1.5, label="8 Myr")
#p29 = plt.plot(x,y29, '.5', linewidth=1.5, label="10 Myr")
plt.xlim(10**1, 10**2)
plt.ylim(10**0, 10**6)
#plt.tick_params(axis='x', bottom='off',labelbottom='off')
plt.title('Starburst Geneva Track Continuous with Rotation- 0.008 Metallicity', fontsize=30)
#plt.xscale('log')
#plt.yscale('log')
#plt.legend(prop={'size':7})
#--------------------------------------------------------
#sp3 = plt.subplot(323)
fig = plt.figure(1)
p13 = plt.plot(x,y13, 'k', linewidth=1.5, label="1 mil yrs")
p14 = plt.plot(x,y14, 'c', linewidth=1.5, label="2 mil yrs")
p15 = plt.plot(x,y15, 'y', linewidth=1.5, label="3 mil yrs")
p16 = plt.plot(x,y16, 'r', linewidth=1.5, label="4 mil yrs")
p17 = plt.plot(x,y17, 'g', linewidth=1.5, label="5 mil yrs")
p18 = plt.plot(x,y18, 'b', linewidth=1.5, label="6 mil yrs")
p30 = plt.plot(x,y30, 'm', linewidth=1.5, label="7 Myr")
p31 = plt.plot(x,y31, '0.75', linewidth=1.5, label="8 Myr")
plt.xlim(10**1, 10**2)
plt.ylim(10**0, 10**6)
plt.title('Starburst Geneva Track Instantaneous with Rotation- 0.002 Metallicity', fontsize=30)
#plt.xlabel(r'$\mu m$', fontsize=10)
#plt.ylabel(r'$4 \pi \nu J_\nu$ (erg $s^{-1} cm ^ {-2})$', fontsize=10)
#plt.xscale('log')
#plt.yscale('log')
#plt.legend(prop={'size':7})
#--------------------------------------------------------
#sp4 = plt.subplot(324)
fig = plt.figure(1)
p19 = plt.plot(x,y19, 'k', linewidth=1.5, label="1 Myr")
p20 = plt.plot(x,y20, 'c', linewidth=1.5, label="2 Myr")
p21 = plt.plot(x,y21, 'y', linewidth=1.5, label="3 Myr")
p22 = plt.plot(x,y22, 'r', linewidth=1.5, label="4 Myr")
p23 = plt.plot(x,y23, 'g', linewidth=1.5, label="5 Myr")
p24 = plt.plot(x,y24, 'b', linewidth=1.5, label="6 Myr")
p27 = plt.plot(x,y27, 'm', linewidth=1.5, label="7 Myr")
p28 = plt.plot(x,y28, '.75', linewidth=1.5, label="8 Myr")
plt.xlim(10**1, 10**2)
plt.ylim(10**0, 10**6)
plt.title('Starburst Geneva Track Instantaneous with Rotation- 0.008 Metallicity', fontsize=30)
#plt.xlabel(r'$\mu m$', fontsize=10)
#plt.tick_params(axis='x', bottom='off',labelbottom='off')
#plt.xscale('log')
#plt.yscale('log')
#plt.legend(prop={'size':7})
#-------------------------------------------------------------
# Plot x vs. y in 4 subplots
'''
fig = plt.figure(1)
#sp1 = plt.subplot(325) #rows columns location of this parcticular plot
p33 = plt.plot(x,y33, 'k', linewidth=.75, label="1 Myr")
p34 = plt.plot(x,y34, 'c', linewidth=.75, label="2 Myr")
p35 = plt.plot(x,y35, 'y', linewidth=.75, label="3 Myr")
p36 = plt.plot(x,y36, 'r', linewidth=.75, label="4 Myr")
p37 = plt.plot(x,y37, 'g', linewidth=.75, label="5 Myr")
p38 = plt.plot(x,y38, 'b', linewidth=.75, label="6 Myr")
p39 = plt.plot(x,y49, 'm', linewidth=.75, label="7 Myr")
p40 = plt.plot(x,y40, '0.75', linewidth=.75, label="8 Myr")
#plt.xlim(10**1, 10**2)
#plt.ylim(10**0, 10**6)
plt.title('Starburst Padova Track Instantaneous', fontsize=10)
#plt.ylabel(r'$4 \pi \nu J_\nu$ (erg $s^{-1} cm ^ {-2})$', fontsize=10)
#plt.xlabel('eV')
#plt.xscale('log')
#plt.yscale('log')
#plt.legend(prop={'size':7})
'''
#-------------------------------------------------------------
# Plot x vs. y in 4 subplots
fig = plt.figure(1)
#sp1 = plt.subplot(326) #rows columns location of this parcticular plot
p1 = plt.plot(x,y31, 'k', linewidth=2, label="1 Myr")
p2 = plt.plot(x,y32, 'c', linewidth=2, label="2 Myr")
p3 = plt.plot(x,y33, 'y', linewidth=2, label="3 Myr")
p4 = plt.plot(x,y34, 'r', linewidth=2, label="4 Myr")
p5 = plt.plot(x,y35, 'g', linewidth=2, label="5 Myr")
p6 = plt.plot(x,y36, 'b', linewidth=2, label="6 Myr")
'''
plt.xlim(10**1, 10**2)
plt.ylim(10**0, 10**6)
plt.yticks(fontsize=25)
plt.xticks(fontsize=25)
#ax = plt.gca()
#ax.tick_params(which='both', direction='out', length=10, width=1)
#plt.title('Padova Track Instantaneous SFH', fontsize=40)
plt.xlabel('Energy (eV)', fontsize = 25)
plt.ylabel(r'$4 \pi \nu J_\nu$ (erg $\mathrm{s}^{-1} \mathrm{cm} ^ {-2})$', fontsize=25)
plt.xscale('log')
plt.yscale('log')
plt.legend(prop={'size':30})
#-------------------------------------------------------------
#plt.savefig('continuum_padovacont.eps', figsize=(30,10))
#plt.clf()
plt.show()
| gpl-2.0 |
amolkahat/pandas | pandas/io/common.py | 1 | 19678 | """Common IO api utilities"""
import codecs
import csv
import mmap
import os
import zipfile
from contextlib import closing, contextmanager
import pandas.core.common as com
from pandas import compat
from pandas.compat import BytesIO, StringIO, string_types, text_type
from pandas.core.dtypes.common import is_file_like, is_number
# compat
from pandas.errors import ( # noqa
DtypeWarning, EmptyDataError, ParserError, ParserWarning
)
from pandas.io.formats.printing import pprint_thing
# gh-12665: Alias for now and remove later.
CParserError = ParserError
# common NA values
# no longer excluding inf representations
# '1.#INF','-1.#INF', '1.#INF000000',
_NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A',
'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan',
'-nan', ''}
if compat.PY3:
from urllib.request import urlopen, pathname2url
_urlopen = urlopen
from urllib.parse import urlparse as parse_url
from urllib.parse import (uses_relative, uses_netloc, uses_params,
urlencode, urljoin)
from urllib.error import URLError
from http.client import HTTPException # noqa
else:
from urllib2 import urlopen as _urlopen
from urllib import urlencode, pathname2url # noqa
from urlparse import urlparse as parse_url
from urlparse import uses_relative, uses_netloc, uses_params, urljoin
from urllib2 import URLError # noqa
from httplib import HTTPException # noqa
from contextlib import contextmanager, closing # noqa
from functools import wraps # noqa
# @wraps(_urlopen)
@contextmanager
def urlopen(*args, **kwargs):
with closing(_urlopen(*args, **kwargs)) as f:
yield f
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
class BaseIterator(object):
"""Subclass this and provide a "__next__()" method to obtain an iterator.
Useful only when the object being iterated is non-reusable (e.g. OK for a
parser, not for an in-memory table, yes for its iterator)."""
def __iter__(self):
return self
def __next__(self):
raise com.AbstractMethodError(self)
if not compat.PY3:
BaseIterator.next = lambda self: self.__next__()
def _is_url(url):
"""Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
try:
return parse_url(url).scheme in _VALID_URLS
except Exception:
return False
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, string_types):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def _validate_header_arg(header):
if isinstance(header, bool):
raise TypeError("Passing a bool to header is invalid. "
"Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
def _stringify_path(filepath_or_buffer):
"""Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol (python 3.6+) are coerced
according to its __fspath__ method.
For backwards compatibility with older pythons, pathlib.Path and
py.path objects are specially coerced.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
try:
import pathlib
_PATHLIB_INSTALLED = True
except ImportError:
_PATHLIB_INSTALLED = False
try:
from py.path import local as LocalPath
_PY_PATH_INSTALLED = True
except ImportError:
_PY_PATH_INSTALLED = False
if hasattr(filepath_or_buffer, '__fspath__'):
return filepath_or_buffer.__fspath__()
if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
return text_type(filepath_or_buffer)
if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
return filepath_or_buffer.strpath
return filepath_or_buffer
def is_s3_url(url):
"""Check for an s3, s3n, or s3a url"""
try:
return parse_url(url).scheme in ['s3', 's3n', 's3a']
except Exception:
return False
def is_gcs_url(url):
"""Check for a gcs url"""
try:
return parse_url(url).scheme in ['gcs', 'gs']
except Exception:
return False
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None, mode=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
mode : str, optional
Returns
-------
tuple of ({a filepath_ or buffer or S3File instance},
encoding, str,
compression, str,
should_close, bool)
"""
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if _is_url(filepath_or_buffer):
req = _urlopen(filepath_or_buffer)
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
# Override compression based on Content-Encoding header
compression = 'gzip'
reader = BytesIO(req.read())
req.close()
return reader, encoding, compression, True
if is_s3_url(filepath_or_buffer):
from pandas.io import s3
return s3.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression,
mode=mode)
if is_gcs_url(filepath_or_buffer):
from pandas.io import gcs
return gcs.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression,
mode=mode)
if isinstance(filepath_or_buffer, (compat.string_types,
compat.binary_type,
mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression, False
if not is_file_like(filepath_or_buffer):
msg = "Invalid file path or buffer object type: {_type}"
raise ValueError(msg.format(_type=type(filepath_or_buffer)))
return filepath_or_buffer, None, compression, False
def file_path_to_url(path):
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
return urljoin('file:', pathname2url(path))
_compression_to_extension = {
'gzip': '.gz',
'bz2': '.bz2',
'zip': '.zip',
'xz': '.xz',
}
def _infer_compression(filepath_or_buffer, compression):
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer :
a path (str) or buffer
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
Returns
-------
string or None :
compression method
Raises
------
ValueError on invalid compression specified
"""
# No compression has been explicitly specified
if compression is None:
return None
# Infer compression
if compression == 'infer':
# Convert all path types (e.g. pathlib.Path) to strings
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if not isinstance(filepath_or_buffer, compat.string_types):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for compression, extension in _compression_to_extension.items():
if filepath_or_buffer.endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _compression_to_extension:
return compression
msg = 'Unrecognized compression type: {}'.format(compression)
valid = ['infer', None] + sorted(_compression_to_extension)
msg += '\nValid compression types are {}'.format(valid)
raise ValueError(msg)
def _get_handle(path_or_buf, mode, encoding=None, compression=None,
memory_map=False, is_text=True):
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf :
a path (str) or buffer
mode : str
mode to open path_or_buf with
encoding : str or None
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None
If 'infer' and `filepath_or_buffer` is path-like, then detect
compression from the following extensions: '.gz', '.bz2', '.zip',
or '.xz' (otherwise no compression).
memory_map : boolean, default False
See parsers._parser_params for more information.
is_text : boolean, default True
whether file/buffer is in text format (csv, json, etc.), or in binary
mode (pickle, etc.)
Returns
-------
f : file-like
A file-like object
handles : list of file-like objects
A list of file-like object that were opened in this function.
"""
try:
from s3fs import S3File
need_text_wrapping = (BytesIO, S3File)
except ImportError:
need_text_wrapping = (BytesIO,)
handles = list()
f = path_or_buf
# Convert pathlib.Path/py.path.local or string
path_or_buf = _stringify_path(path_or_buf)
is_path = isinstance(path_or_buf, compat.string_types)
if is_path:
compression = _infer_compression(path_or_buf, compression)
if compression:
if compat.PY2 and not is_path and encoding:
msg = 'compression with encoding is not yet supported in Python 2'
raise ValueError(msg)
# GZ Compression
if compression == 'gzip':
import gzip
if is_path:
f = gzip.open(path_or_buf, mode)
else:
f = gzip.GzipFile(fileobj=path_or_buf)
# BZ Compression
elif compression == 'bz2':
import bz2
if is_path:
f = bz2.BZ2File(path_or_buf, mode)
elif compat.PY2:
# Python 2's bz2 module can't take file objects, so have to
# run through decompress manually
f = StringIO(bz2.decompress(path_or_buf.read()))
path_or_buf.close()
else:
f = bz2.BZ2File(path_or_buf)
# ZIP Compression
elif compression == 'zip':
zf = BytesZipFile(path_or_buf, mode)
# Ensure the container is closed as well.
handles.append(zf)
if zf.mode == 'w':
f = zf
elif zf.mode == 'r':
zip_names = zf.namelist()
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
elif len(zip_names) == 0:
raise ValueError('Zero files found in ZIP file {}'
.format(path_or_buf))
else:
raise ValueError('Multiple files found in ZIP file.'
' Only one file per ZIP: {}'
.format(zip_names))
# XZ Compression
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path_or_buf, mode)
# Unrecognized Compression
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
handles.append(f)
elif is_path:
if compat.PY2:
# Python 2
mode = "wb" if mode == "w" else mode
f = open(path_or_buf, mode)
elif encoding:
# Python 3 and encoding
f = open(path_or_buf, mode, encoding=encoding, newline="")
elif is_text:
# Python 3 and no explicit encoding
f = open(path_or_buf, mode, errors='replace', newline="")
else:
# Python 3 and binary mode
f = open(path_or_buf, mode)
handles.append(f)
# in Python 3, convert BytesIO or fileobjects passed with an encoding
if (compat.PY3 and is_text and
(compression or isinstance(f, need_text_wrapping))):
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
handles.append(f)
if memory_map and hasattr(f, 'fileno'):
try:
g = MMapWrapper(f)
f.close()
f = g
except Exception:
# we catch any errors that may have occurred
# because that is consistent with the lower-level
# functionality of the C engine (pd.read_csv), so
# leave the file handler as is then
pass
return f, handles
class BytesZipFile(zipfile.ZipFile, BytesIO):
"""
Wrapper for standard library class ZipFile and allow the returned file-like
handle to accept byte strings via `write` method.
BytesIO provides attributes of file-like object and ZipFile.writestr writes
bytes strings into a member of the archive.
"""
# GH 17778
def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs):
if mode in ['wb', 'rb']:
mode = mode.replace('b', '')
super(BytesZipFile, self).__init__(file, mode, compression, **kwargs)
def write(self, data):
super(BytesZipFile, self).writestr(self.filename, data)
@property
def closed(self):
return self.fp is None
class MMapWrapper(BaseIterator):
"""
Wrapper for the Python's mmap class so that it can be properly read in
by Python's csv.reader class.
Parameters
----------
f : file object
File object to be mapped onto memory. Must support the 'fileno'
method or have an equivalent attribute
"""
def __init__(self, f):
self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
def __getattr__(self, name):
return getattr(self.mmap, name)
def __iter__(self):
return self
def __next__(self):
newline = self.mmap.readline()
# readline returns bytes, not str, in Python 3,
# but Python's CSV reader expects str, so convert
# the output to str before continuing
if compat.PY3:
newline = compat.bytes_to_str(newline)
# mmap doesn't raise if reading past the allocated
# data but instead returns an empty string, so raise
# if that is returned
if newline == '':
raise StopIteration
return newline
if not compat.PY3:
MMapWrapper.next = lambda self: self.__next__()
class UTF8Recoder(BaseIterator):
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def read(self, bytes=-1):
return self.reader.read(bytes).encode("utf-8")
def readline(self):
return self.reader.readline().encode("utf-8")
def next(self):
return next(self.reader).encode("utf-8")
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader(BaseIterator):
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and re-encode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode("utf-8") for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and re-encode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
| bsd-3-clause |
jchodera/MSMs | plots/plotting_conformations_ensembler_models.py | 3 | 1404 | # import libraries
import matplotlib
matplotlib.use('Agg')
import mdtraj as md
import matplotlib.pyplot as plt
import numpy as np
from msmbuilder import dataset
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
#Load trajectory with ensembler models
t_models = md.load("../ensembler-models/traj-refine_implicit_md.xtc", top = "../ensembler-models/topol-renumbered-implicit.pdb")
#define 'difference' as hydrogen bond distance
k295e310 = md.compute_contacts(t_models, [[28,43]])
e310r409 = md.compute_contacts(t_models, [[43,142]])
difference = e310r409[0] - k295e310[0]
#define 'rmsd' as RMSD of activation loop from 2SRC structure
SRC2 = md.load("../reference-structures/SRC_2SRC_A.pdb")
Activation_Loop_SRC2 = [atom.index for atom in SRC2.topology.atoms if (138 <= atom.residue.index <= 158)]
Activation_Loop_Src = [atom.index for atom in t_models.topology.atoms if (138 <= atom.residue.index <= 158)]
SRC2.atom_slice(Activation_Loop_SRC2)
t_models.atom_slice(Activation_Loop_Src)
difference = difference[:,0]
rmsd = md.rmsd(t_models,SRC2,frame=0)
#plot
#plt.plot(rmsd, difference, 'o', markersize=5, label="ensembler models", color='black')
sns.kdeplot(rmsd,difference,shade=True,log=True)
plt.xlabel('RMSD Activation Loop (nm)')
plt.ylabel('d(E310-R409) - d(K295-E310) (nm)')
plt.ylim(-2,2)
plt.xlim(0.3,1.0)
plt.savefig('plot_conf_src_ensembler_density.png')
| gpl-2.0 |
elkingtonmcb/seldon-server | external/predictor/python/seldon/pipeline/auto_transforms.py | 5 | 6252 | import seldon.pipeline.pipelines as pl
from sklearn import preprocessing
from dateutil.parser import parse
import datetime
class Auto_transform(pl.Feature_transform):
"""Automatically transform a set of features into normalzied numeric or categorical features or dates
Args:
exclude (list):list of features to not include
max_values_numeric_categorical (int):max number of unique values for numeric feature to treat as categorical
custom_date_formats (list(str)): list of custom date formats to try
ignore_vals (list(str)): list of feature values to treat as NA/ignored values
"""
def __init__(self,exclude=[],max_values_numeric_categorical=20,custom_date_formats=None,ignore_vals=None):
super(Auto_transform, self).__init__()
self.exclude = exclude
self.max_values_numeric_categorical = max_values_numeric_categorical
self.scalers = {}
self.custom_date_formats = custom_date_formats
if ignore_vals:
self.ignore_vals = ignore_vals
else:
self.ignore_vals = ["NA",""]
self.transforms = {}
def get_models(self):
return [(self.exclude,self.custom_date_formats,self.max_values_numeric_categorical),self.transforms,self.scalers]
def set_models(self,models):
(self.exclude,self.custom_date_formats,self.max_values_numeric_categorical) = models[0]
self.transforms = models[1]
self.scalers = models[2]
@staticmethod
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
@staticmethod
def isBoolean(v):
v = str(v)
return v.lower() == "true" or v.lower() == "false" or v == "1" or v == "0"
@staticmethod
def toBoolean(f,v):
v = str(v)
if v.lower() == "true" or v == "1":
return 1
else:
return 0
def fit_scalers(self,objs,features):
"""fit numeric scalers on all numeric features
requires enough memory to run sklearn standard scaler on all values for a feature
"""
print "creating ",len(features),"features scalers"
Xs = {}
for f in features:
Xs[f] = []
for j in objs:
for f in features:
if f in j and self.is_number(j[f]):
Xs[f].append(float(j[f]))
c = 1
for f in Xs:
print "creating feature scaler",c," for ",f
self.scalers[f] = preprocessing.StandardScaler(with_mean=True, with_std=True).fit(Xs[f])
c += 1
def scale(self,f,v):
if self.is_number(v):
return self.scalers[f].transform([v])[0]
else:
return 0
@staticmethod
def make_categorical_token(f,v):
"""make a ctaegorical feature from feature and its value
"""
v = str(v).lower().replace(" ","_")
if Auto_transform.is_number(v):
return "t_"+v
else:
return v
def is_date(self,v):
"""is this feature a date
"""
try:
parse(v)
return True
except:
if self.custom_date_formats:
for f in self.custom_date_formats:
try:
datetime.datetime.strptime( v, f )
return True
except:
pass
return False
def unix_time(self,dt):
"""transform a date into a unix day number
"""
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def to_date(self,f,v):
d = None
try:
d = parse(v)
except:
for f in self.custom_date_formats:
try:
d = datetime.datetime.strptime( v, f )
except:
pass
if d:
return "t_"+str(int(self.unix_time(d)/86400))
else:
return None
def fit(self,objs):
"""try to guess a transform to apply to each feature
"""
values = {}
c = 1
for j in objs:
for f in j:
if f in self.exclude or j[f] in self.ignore_vals:
pass
else:
cur = values.get(f,set())
if len(cur) < (self.max_values_numeric_categorical + 1):
cur.add(j[f])
values[f] = cur
featuresToScale = []
for f in values:
if all(self.isBoolean(x) for x in values[f]):
self.transforms[f] = self.toBoolean.__name__
else:
if len(values[f]) > self.max_values_numeric_categorical:
if all(self.is_number(x) for x in values[f]):
featuresToScale.append(f)
self.transforms[f] = self.scale.__name__
elif all(self.is_date(x) for x in values[f]):
self.transforms[f] = self.to_date.__name__
else:
self.transforms[f] = self.make_categorical_token.__name__
else:
self.transforms[f] = self.make_categorical_token.__name__
self.fit_scalers(objs,featuresToScale)
def transform(self,j):
"""Apply learnt transforms on each feature
"""
jNew = {}
for f in j:
if not f in self.transforms:
jNew[f] = j[f]
else:
if not j[f] in self.ignore_vals:
vNew = getattr(self,self.transforms[f])(f,j[f])
if vNew:
jNew[f] = vNew
return jNew
if __name__ == '__main__':
objs = [{"a":2.0,"b":"NA","c":1,"d":"29JAN14:21:16:00","e":46},{"a":2.0,"b":"false","c":"trousers","d":"31 jan 2015","e":46},{"a":1.0,"b":0,"c":"big hats","d":"28 aug 2015","e":46}]
t = Auto_transform(max_values_numeric_categorical=1,custom_date_formats = ["%d%b%y:%H:%M:%S"])
t.fit(objs)
objsNew = []
for j in objs:
objsNew.append(t.transform(j))
print objsNew
| apache-2.0 |
narwhaltribe/Hyperloop | src/hyperloop/chart_generator.py | 2 | 5443 | from hyperloop_sim import HyperloopSim
from openmdao.units.units import convert_units as cu
from matplotlib import pyplot, rcParams
import numpy as np
from time import time
import sys
from os import devnull
def plot(p, x_array, x_varname, y_varnames, x_label, y_label,
title='HyperloopSim', postprocess_funcs=tuple(),
show=True, filename='', suppress_errs=True):
'''
Runs an OpenMDAO problem for multiple values of x and plots the specified
results.
Parameters
----------
p : openmdao.core.problem.problem
x_array : numpy.array
X values to sample.
x_varname : str
OpenMDAO variable name that can be set using p[`x_varname`].
y_varnames : list of str
OpenMDAO variable names that can be read using p[`y_varnames`[i]].
x_label, y_label : str
Axis labels for graph.
title : str
Title of graph.
postprocess_funcs : tuple of functions
For each y variable, if the corresponding function in the tuple exists
and is not equal to None, the function will be called with the y value
as the only parameter. The returned value will be stored and plotted.
show : bool
Whether or not to open plot in a window.
filename : str
If specified, the location to save a PNG of the plot.
suppress_errs : bool
Attempts to plot data instead of raising an exception if OpenMDAO
encounters an error.
'''
progress_width = 50
y_arrays = list([] for varname in y_varnames)
opt_num = 0
print 'Running optimizations...'
print ''
start_time = time()
for val in x_array:
elapsed_s = time() - start_time
progress = float(opt_num) / len(x_array)
remaining = elapsed_s / progress - elapsed_s if progress > 0 else float('nan')
sys.stdout.write('[%s] %.2f minutes remaining \r' %
('#' * int(progress * progress_width) + '-' * (progress_width - int(progress * progress_width)),
float(remaining) / 60))
sys.stdout.flush()
p[x_varname] = val
sys.stdout = open(devnull, 'w')
try:
p.run()
sys.stdout.close()
sys.stdout = sys.__stdout__
for i in range(len(y_varnames)):
out = p[y_varnames[i]]
if len(postprocess_funcs) > i and postprocess_funcs[i] != None:
out = postprocess_funcs[i](out)
y_arrays[i].append(out)
except:
sys.stdout.close()
sys.stdout = sys.__stdout__
print 'WARNING: Error encountered running system. Plotting prematurely.'
break
opt_num += 1
sys.stdout.write('[%s] %.2f minutes elapsed \r' %
('#' * progress_width, float(elapsed_s) / 60))
sys.stdout.flush()
print ''
print ''
colors = ('r', 'g', 'b', 'c', 'm', 'y', 'k')
f = pyplot.figure()
ax = f.add_subplot(1, 1, 1, frame_on=True, xlabel=x_label,
ylabel=y_label, title=title)
ax.tick_params(axis='both', which='major', labelsize=12)
f.set_size_inches(8, 5)
for i in range(len(y_varnames)):
label = y_varnames[i] if (len(postprocess_funcs) <= i or
postprocess_funcs[i] == None) else '* ' + y_varnames[i]
ax.plot(x_array, y_arrays[i], '-', label=label,
lw=2, c=colors[i % len(colors)], alpha=0.6)
ax.legend(loc='best')
if filename != '':
f.savefig(filename, dpi=130)
if show:
f.show()
class PostProcess():
@staticmethod
def converter(unit1, unit2):
def convert(val):
return cu(val, unit1, unit2)
return convert
@staticmethod
def invert(val):
return -val
if __name__ == "__main__":
p = HyperloopSim.p_factory(inlet_area=0.4735, cross_section=0.8538)
# Mass flow
plot(p, x_array=np.arange(0.025, 0.6, 0.025),
x_varname='pod_MN',
y_varnames=('tube_flow.W', 'bypass_W', 'compression_system.inlet.Fl_I:stat:W'),
x_label='Travel Mach',
y_label='Mass flow (kg/s)',
title='OpenMDAO: UW Pod, Rev: 10 Nov 2015',
postprocess_funcs=(None, None, PostProcess.lbm2kg),
show=True, filename='/Users/brent/Desktop/PyCycle/auto/Mass_Flow.png')
# Compressor CFM
p['percent_into_bypass'] = 1.0 - p['inlet_area'] / p['tube_area']
p['compression_system.comp1.eff_design'] = 0.8
p['compression_system.comp1.PR_design'] = 1.5
plot(p, x_array=np.arange(0.05, 0.5875, 0.0125),
x_varname='pod_MN',
y_varnames=('comp1_cfm',),
x_label='Travel Mach',
y_label='CFM Entering Fan',
title='OpenMDAO: UW Pod, Rev: 10 Nov 2015',
show=True, filename='/Users/brent/Desktop/PyCycle/auto/CFM.png')
# Flow by cross section
p['pod_MN'] = 0.35
p['percent_into_bypass'] = 1.0
p['inlet_area'] = 0.4735
plot(p, x_array=np.arange(0.8538, 1.5, 0.025),
x_varname='cross_section',
y_varnames=('tube_flow.W', 'bypass_W', 'compression_system.inlet.Fl_I:stat:W'),
x_label='Cross Section (m^2) at Mach 0.35',
y_label='Mass flow (kg/s)',
title='OpenMDAO: UW Pod, Rev: 10 Nov 2015',
postprocess_funcs=(None, None, PostProcess.converter('lbm/s', 'kg/s')),
show=True, filename='/Users/brent/Desktop/PyCycle/auto/Cross_Section_Flow.png')
| apache-2.0 |
capitancambio/brainz | brainz/plotting/example.py | 1 | 11872 | """
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import wxversion
wxversion.select('2.8')
import wx
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
class DataGen(object):
""" A silly class that generates pseudo-random data for
display in the plot.
"""
def __init__(self, init=0):
self.data = self.init = init
def next(self):
self._recalc_data()
return self.data
def _recalc_data(self):
delta = random.uniform(-0.5, 0.5)
r = random.random()
if r > 0.9:
self.data += delta * 15
elif r > 0.8:
# attraction to the initial value
delta += (0.5 if self.init > self.data else -0.5)
self.data += delta
else:
self.data += delta
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.buffsz=200
self.buffcnt=0
self.chans=64
self.datagen=[]
self.data=[]
for i in range(0,self.chans):
self.datagen.append( DataGen())
self.data.append( [0]*self.buffsz)
self.paused = False
self.axes=[]
self.plot_data=[]
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(1000)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi =25
self.fig = Figure((40.0, 40.0), dpi=self.dpi)
print self.axes
for i in range(0,self.chans):
self.axes.append(self.fig.add_subplot(self.chans,1,i))
self.axes[i].set_axis_bgcolor('black')
self.axes[i].set_xbound(lower=0, upper=self.buffsz)
self.axes[i].set_ybound(lower=-10, upper=10)
self.axes[i].set_ylim(bottom=-10,top=10)
pylab.setp(self.axes[i].get_xticklabels(),
visible=False)
pylab.setp(self.axes[i].get_yticklabels(),
visible=False)
self.fig.subplots_adjust(hspace=0)
#pylab.setp(self.axes[-1].get_xticklabels(), fontsize=8)
#pylab.setp(self.axes[-1].get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
for i in range(0,self.chans):
self.plot_data.append( self.axes[i].plot(
self.data[i],
linewidth=1,
color=(1, 1, 0),
)[0])
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
#if self.xmax_control.is_auto():
#xmax = len(self.data) if len(self.data) > 50 else 50
#else:
#xmax = int(self.xmax_control.manual_value())
#
#if self.xmin_control.is_auto():
#xmin = xmax - 50
#else:
#xmin = int(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
#if self.ymin_control.is_auto():
#ymin = round(min(self.data), 0) - 1
#else:
#ymin = int(self.ymin_control.manual_value())
#
#if self.ymax_control.is_auto():
#ymax = round(max(self.data), 0) + 1
#else:
#ymax = int(self.ymax_control.manual_value())
#
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
for i in range(0,self.chans):
if self.cb_grid.IsChecked():
self.axes[i].grid(True, color='gray')
else:
self.axes[i].grid(False)
ymin=min([min(self.data[i]),-10])
ymax=max([max(self.data[i]),10])
self.axes[i].set_ybound(lower=ymin, upper=ymax)
self.axes[i].set_ylim(bottom=ymin,top=ymax)
#
## Using setp here is convenient, because get_xticklabels
## returns a list over which one needs to explicitly
## iterate, and setp already handles this.
##
#pylab.setp(self.axes[i].get_xticklabels(),
#visible=False)
self.plot_data[i].set_xdata(np.arange(len(self.data[i])))
self.plot_data[i].set_ydata(np.array(self.data[i]))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused:
if self.buffcnt==self.buffsz:
self.buffcnt=0
for i in range(0,self.chans):
self.data[i]=[0]*self.buffsz
for i in range(0,self.chans):
self.data[i][self.buffcnt]=self.datagen[i].next()
self.data[i].append(None)
self.buffcnt+=1
self.draw_plot()
def on_exit(self, event):
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
| mit |
kwentz10/Cellular_Automata | Git_Add_Scripts/TeaLeaf_Dispersion.py | 1 | 4436 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 11 13:22:59 2016
Cellular Automata: Simple Tea Leaf Dispersion (non-oriented, raster)
Using CellLab
@author: Katherine
"""
import time
import matplotlib
from numpy import where
from landlab import RasterModelGrid
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.raster_cts import RasterCTS
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for an unbiased random walk.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain, tea leaf, or solute molecule).
The states and transitions are as follows:
Pair state Transition to Process Rate (cells/s)
========== ============= ======= ==============
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left/down motion 10.0
2 (1-0) 1 (0-1) right/up motion 10.0
3 (1-1) (none) - -
"""
# Create an empty transition list
xn_list = []
# Append two transitions to the list.
# Note that the arguments to the Transition() object constructor are:
# - Tuple representing starting pair state
# (left/bottom cell, right/top cell, orientation)
# - Tuple representing new pair state
# (left/bottom cell, right/top cell, orientation)
# - Transition rate (cells per time step, in this case 1 sec)
# - Name for transition
xn_list.append( Transition((0,1,0), (1,0,0), 10., 'left/down motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 10., 'right/up motion') )
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 80 # number of rows in grid
nc = 50 # number of columns in grid
plot_interval = 0.5 # time interval for plotting, sec
run_duration = 20.0 # duration of run, sec
report_interval = 10.0 # report interval, in real-time seconds
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
#Create a node-state dictionary
ns_dict = { 0 : 'fluid', 1 : 'particle' }
#Create the transition list
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# Initialize the node-state array: here, the initial condition is a pile of
# resting grains at the bottom of a container.
bottom_rows = where(mg.node_y<0.1*nr)[0]
node_state_grid[bottom_rows] = 1
# For visual display purposes, set all boundary nodes to fluid
node_state_grid[mg.closed_boundary_nodes] = 0
# Create the CA model
ca = RasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Set up colors for plotting
grain = '#5F594D'
fluid = '#D0E4F2'
clist = [fluid,grain]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current simulation time '+str(current_time)+' \
('+str(int(100*current_time/run_duration))+'%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state, plot_each_transition=False)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
ca_plotter.finalize()
main() | mit |
AdrienGuille/TOM | tom_lib/nlp/topic_model.py | 1 | 13558 | # coding: utf-8
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
import tom_lib.stats
from scipy import spatial, cluster
from scipy.sparse import coo_matrix
from sklearn.decomposition import NMF, LatentDirichletAllocation as LDA
#import lda
from tom_lib.structure.corpus import Corpus
__author__ = "Adrien Guille, Pavel Soriano"
__email__ = "[email protected]"
class TopicModel(object):
__metaclass__ = ABCMeta
def __init__(self, corpus):
self.corpus = corpus # a Corpus object
self.document_topic_matrix = None # document x topic matrix
self.topic_word_matrix = None # topic x word matrix
self.nb_topics = None # a scalar value > 1
@abstractmethod
def infer_topics(self, num_topics=10, **kwargs):
pass
def greene_metric(self, min_num_topics=10, step=5, max_num_topics=50, top_n_words=10, tao=10):
"""
Implements Greene metric to compute the optimal number of topics. Taken from How Many Topics?
Stability Analysis for Topic Models from Greene et al. 2014.
:param step:
:param min_num_topics: Minimum number of topics to test
:param max_num_topics: Maximum number of topics to test
:param top_n_words: Top n words for topic to use
:param tao: Number of sampled models to build
:return: A list of len (max_num_topics - min_num_topics) with the stability of each tested k
"""
stability = []
# Build reference topic model
# Generate tao topic models with tao samples of the corpus
for k in range(min_num_topics, max_num_topics + 1, step):
self.infer_topics(k)
reference_rank = [list(zip(*self.top_words(i, top_n_words)))[0] for i in range(k)]
agreement_score_list = []
for t in range(tao):
tao_corpus = Corpus(source_file_path=self.corpus._source_file_path,
language=self.corpus._language,
n_gram=self.corpus._n_gram,
vectorization=self.corpus._vectorization,
max_relative_frequency=self.corpus._max_relative_frequency,
min_absolute_frequency=self.corpus._min_absolute_frequency,
sample=True)
tao_model = type(self)(tao_corpus)
tao_model.infer_topics(k)
tao_rank = [next(zip(*tao_model.top_words(i, top_n_words))) for i in range(k)]
agreement_score_list.append(tom_lib.stats.agreement_score(reference_rank, tao_rank))
stability.append(np.mean(agreement_score_list))
return stability
def arun_metric(self, min_num_topics=10, max_num_topics=50, iterations=10):
"""
Implements Arun metric to estimate the optimal number of topics:
Arun, R., V. Suresh, C. V. Madhavan, and M. N. Murthy
On finding the natural number of topics with latent dirichlet allocation: Some observations.
In PAKDD (2010), pp. 391–402.
:param min_num_topics: Minimum number of topics to test
:param max_num_topics: Maximum number of topics to test
:param iterations: Number of iterations per value of k
:return: A list of len (max_num_topics - min_num_topics) with the average symmetric KL divergence for each k
"""
kl_matrix = []
for j in range(iterations):
kl_list = []
l = np.array([sum(self.corpus.vector_for_document(doc_id)) for doc_id in range(self.corpus.size)]) # document length
norm = np.linalg.norm(l)
for i in range(min_num_topics, max_num_topics + 1):
self.infer_topics(i)
c_m1 = np.linalg.svd(self.topic_word_matrix.todense(), compute_uv=False)
c_m2 = l.dot(self.document_topic_matrix.todense())
c_m2 += 0.0001 # we need this to prevent components equal to zero
c_m2 /= norm
kl_list.append(tom_lib.stats.symmetric_kl(c_m1.tolist(), c_m2.tolist()[0]))
kl_matrix.append(kl_list)
ouput = np.array(kl_matrix)
return ouput.mean(axis=0)
def brunet_metric(self, min_num_topics=10, max_num_topics=50, iterations=10):
"""
Implements a consensus-based metric to estimate the optimal number of topics:
Brunet, J.P., Tamayo, P., Golub, T.R., Mesirov, J.P.
Metagenes and molecular pattern discovery using matrix factorization.
Proc. National Academy of Sciences 101(12) (2004), pp. 4164–4169
:param min_num_topics:
:param max_num_topics:
:param iterations:
:return:
"""
cophenetic_correlation = []
for i in range(min_num_topics, max_num_topics+1):
average_C = np.zeros((self.corpus.size, self.corpus.size))
for j in range(iterations):
self.infer_topics(i)
for p in range(self.corpus.size):
for q in range(self.corpus.size):
if self.most_likely_topic_for_document(p) == self.most_likely_topic_for_document(q):
average_C[p, q] += float(1./iterations)
clustering = cluster.hierarchy.linkage(average_C, method='average')
Z = cluster.hierarchy.dendrogram(clustering, orientation='right')
index = Z['leaves']
average_C = average_C[index, :]
average_C = average_C[:, index]
(c, d) = cluster.hierarchy.cophenet(Z=clustering, Y=spatial.distance.pdist(average_C))
# plt.clf()
# f, ax = plt.subplots(figsize=(11, 9))
# ax = sns.heatmap(average_C)
# plt.savefig('reorderedC.png')
cophenetic_correlation.append(c)
return cophenetic_correlation
def print_topics(self, num_words=10, sort_by_freq=''):
frequency = self.topics_frequency()
topic_list = []
for topic_id in range(self.nb_topics):
word_list = []
for weighted_word in self.top_words(topic_id, num_words):
word_list.append(weighted_word[0])
topic_list.append((topic_id, frequency[topic_id], word_list))
if sort_by_freq == 'asc':
topic_list.sort(key=lambda x: x[1], reverse=False)
elif sort_by_freq == 'desc':
topic_list.sort(key=lambda x: x[1], reverse=True)
for topic_id, frequency, topic_desc in topic_list:
print('topic %d\t%f\t%s' % (topic_id, frequency, ' '.join(topic_desc)))
def top_words(self, topic_id, num_words):
vector = self.topic_word_matrix[topic_id]
cx = vector.tocoo()
weighted_words = [()] * len(self.corpus.vocabulary)
for row, word_id, weight in itertools.zip_longest(cx.row, cx.col, cx.data):
weighted_words[word_id] = (self.corpus.word_for_id(word_id), weight)
weighted_words.sort(key=lambda x: x[1], reverse=True)
return weighted_words[:num_words]
def top_documents(self, topic_id, num_docs):
vector = self.document_topic_matrix[:, topic_id]
cx = vector.tocoo()
weighted_docs = [()] * self.corpus.size
for doc_id, topic_id, weight in itertools.zip_longest(cx.row, cx.col, cx.data):
weighted_docs[doc_id] = (doc_id, weight)
weighted_docs.sort(key=lambda x: x[1], reverse=True)
return weighted_docs[:num_docs]
def word_distribution_for_topic(self, topic_id):
vector = self.topic_word_matrix[topic_id].toarray()
return vector[0]
def topic_distribution_for_document(self, doc_id):
vector = self.document_topic_matrix[doc_id].toarray()
return vector[0]
def topic_distribution_for_word(self, word_id):
vector = self.topic_word_matrix[:, word_id].toarray()
return vector.T[0]
def topic_distribution_for_author(self, author_name):
all_weights = []
for document_id in self.corpus.documents_by_author(author_name):
all_weights.append(self.topic_distribution_for_document(document_id))
output = np.array(all_weights)
return output.mean(axis=0)
def most_likely_topic_for_document(self, doc_id):
weights = list(self.topic_distribution_for_document(doc_id))
return weights.index(max(weights))
def topic_frequency(self, topic, date=None):
return self.topics_frequency(date=date)[topic]
def topics_frequency(self, date=None):
frequency = np.zeros(self.nb_topics)
if date is None:
ids = range(self.corpus.size)
else:
ids = self.corpus.doc_ids(date)
for i in ids:
topic = self.most_likely_topic_for_document(i)
frequency[topic] += 1.0 / len(ids)
return frequency
def documents_for_topic(self, topic_id):
doc_ids = []
for doc_id in range(self.corpus.size):
most_likely_topic = self.most_likely_topic_for_document(doc_id)
if most_likely_topic == topic_id:
doc_ids.append(doc_id)
return doc_ids
def documents_per_topic(self):
topic_associations = {}
for i in range(self.corpus.size):
topic_id = self.most_likely_topic_for_document(i)
if topic_associations.get(topic_id):
documents = topic_associations[topic_id]
documents.append(i)
topic_associations[topic_id] = documents
else:
documents = [i]
topic_associations[topic_id] = documents
return topic_associations
def affiliation_repartition(self, topic_id):
counts = {}
doc_ids = self.documents_for_topic(topic_id)
for i in doc_ids:
affiliations = set(self.corpus.affiliation(i))
for affiliation in affiliations:
if counts.get(affiliation) is not None:
count = counts[affiliation] + 1
counts[affiliation] = count
else:
counts[affiliation] = 1
tuples = []
for affiliation, count in counts.items():
tuples.append((affiliation, count))
tuples.sort(key=lambda x: x[1], reverse=True)
return tuples
class LatentDirichletAllocation(TopicModel):
def infer_topics(self, num_topics=10, algorithm='variational', **kwargs):
self.nb_topics = num_topics
lda_model = None
topic_document = None
if algorithm == 'variational':
lda_model = LDA(n_components=num_topics, learning_method='batch')
topic_document = lda_model.fit_transform(self.corpus.sklearn_vector_space)
elif algorithm == 'gibbs':
lda_model = lda.LDA(n_topics=num_topics, n_iter=500)
topic_document = lda_model.fit_transform(self.corpus.sklearn_vector_space)
else:
raise ValueError("algorithm must be either 'variational' or 'gibbs', got '%s'" % algorithm)
self.topic_word_matrix = []
self.document_topic_matrix = []
vocabulary_size = len(self.corpus.vocabulary)
row = []
col = []
data = []
for topic_idx, topic in enumerate(lda_model.components_):
for i in range(vocabulary_size):
row.append(topic_idx)
col.append(i)
data.append(topic[i])
self.topic_word_matrix = coo_matrix((data, (row, col)),
shape=(self.nb_topics, len(self.corpus.vocabulary))).tocsr()
row = []
col = []
data = []
doc_count = 0
for doc in topic_document:
topic_count = 0
for topic_weight in doc:
row.append(doc_count)
col.append(topic_count)
data.append(topic_weight)
topic_count += 1
doc_count += 1
self.document_topic_matrix = coo_matrix((data, (row, col)),
shape=(self.corpus.size, self.nb_topics)).tocsr()
class NonNegativeMatrixFactorization(TopicModel):
def infer_topics(self, num_topics=10, **kwargs):
self.nb_topics = num_topics
nmf = NMF(n_components=num_topics)
topic_document = nmf.fit_transform(self.corpus.sklearn_vector_space)
self.topic_word_matrix = []
self.document_topic_matrix = []
vocabulary_size = len(self.corpus.vocabulary)
row = []
col = []
data = []
for topic_idx, topic in enumerate(nmf.components_):
for i in range(vocabulary_size):
row.append(topic_idx)
col.append(i)
data.append(topic[i])
self.topic_word_matrix = coo_matrix((data, (row, col)),
shape=(self.nb_topics, len(self.corpus.vocabulary))).tocsr()
row = []
col = []
data = []
doc_count = 0
for doc in topic_document:
topic_count = 0
for topic_weight in doc:
row.append(doc_count)
col.append(topic_count)
data.append(topic_weight)
topic_count += 1
doc_count += 1
self.document_topic_matrix = coo_matrix((data, (row, col)),
shape=(self.corpus.size, self.nb_topics)).tocsr()
| mit |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/examples/skflow/iris_val_based_early_stopping.py | 3 | 2275 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, metrics
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import learn
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
test_size=0.2,
random_state=42)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(X_val, y_val,
early_stopping_rounds=200,
n_classes=3)
# classifier with early stopping on training data
classifier1 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier1.fit(X_train, y_train, logdir='/tmp/iris_model/')
score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test))
# classifier with early stopping on validation data
classifier2 = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=2000)
classifier2.fit(X_train, y_train, val_monitor, logdir='/tmp/iris_model_val/')
score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
# in many applications, the score is improved by using early stopping on val data
print(score2 > score1)
| apache-2.0 |
rgommers/statsmodels | statsmodels/examples/tut_ols_ancova.py | 33 | 2455 | '''Examples OLS
Note: uncomment plt.show() to display graphs
Summary:
========
Relevant part of construction of design matrix
xg includes group numbers/labels,
x1 is continuous explanatory variable
>>> dummy = (xg[:,None] == np.unique(xg)).astype(float)
>>> X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
Estimate the model
>>> res2 = sm.OLS(y, X).fit()
>>> print res2.params
[ 1.00901524 3.08466166 -2.84716135 9.94655423]
>>> print res2.bse
[ 0.07499873 0.71217506 1.16037215 0.38826843]
>>> prstd, iv_l, iv_u = wls_prediction_std(res2)
"Test hypothesis that all groups have same intercept"
>>> R = [[0, 1, 0, 0],
... [0, 0, 1, 0]]
>>> print res2.f_test(R)
<F test: F=array([[ 91.69986847]]), p=[[ 8.90826383e-17]], df_denom=46, df_num=2>
strongly rejected because differences in intercept are very large
'''
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.sandbox.regression.predstd import wls_prediction_std
#fix a seed for these examples
np.random.seed(98765789)
#OLS with dummy variables, similar to ANCOVA
#-------------------------------------------
#construct simulated example:
#3 groups common slope but different intercepts
nsample = 50
x1 = np.linspace(0, 20, nsample)
sig = 1.
#suppose observations from 3 groups
xg = np.zeros(nsample, int)
xg[20:40] = 1
xg[40:] = 2
#print xg
dummy = (xg[:,None] == np.unique(xg)).astype(float)
#use group 0 as benchmark
X = np.c_[x1, dummy[:,1:], np.ones(nsample)]
beta = [1., 3, -3, 10]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#estimate
#~~~~~~~~
res2 = sm.OLS(y, X).fit()
#print "estimated parameters: x d1-d0 d2-d0 constant"
print(res2.params)
#print "standard deviation of parameter estimates"
print(res2.bse)
prstd, iv_l, iv_u = wls_prediction_std(res2)
#print res.summary()
#plot
#~~~~
plt.figure()
plt.plot(x1, y, 'o', x1, y_true, 'b-')
plt.plot(x1, res2.fittedvalues, 'r--.')
plt.plot(x1, iv_u, 'r--')
plt.plot(x1, iv_l, 'r--')
plt.title('3 groups: different intercepts, common slope; blue: true, red: OLS')
plt.show()
#Test hypothesis that all groups have same intercept
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
R = [[0, 1, 0, 0],
[0, 0, 1, 0]]
# F test joint hypothesis R * beta = 0
# i.e. coefficient on both dummy variables equal zero
print("Test hypothesis that all groups have same intercept")
print(res2.f_test(R))
| bsd-3-clause |
seakers/daphne_brain | AT/KNN.py | 1 | 3703 | import hashlib
from django.conf import settings
from channels.generic.websocket import JsonWebsocketConsumer
import pandas as pd
import numpy as np
class adaptiveKNN(JsonWebsocketConsumer):
##### WebSocket event handlers
def connect(self):
"""
Called when the websocket is handshaking as part of initial connection.
"""
# Accept the connection
self.accept()
def receive_json(self, content, **kwargs):
# Reads the data if available in content
if 'data' in content:
data = pd.read_json(content['data'], orient='records').set_index('timestamp')
else:
# Read Sample CSV
data = pd.read_csv('AT/Data/sample_3.csv', parse_dates=True, index_col='timestamp')
if 'k' in content: # Number of nearest Neighbors
k = content['k']
else:
k = 10
if 'c' in content: # Overall smoothing parameter
c = content['c']
else:
c = 0.7
if 'eps' in content: # Epsilon to avoid per zero division
eps = content['eps']
else:
eps = 10e-5
if 'sp' in content: # Random sampling fraction parameters
sp = content['sp']
else:
sp = 1
# Transfer data to matrix
x = np.asmatrix(data.values)
# This parameters must be entered
n = len(x)
m = int(n*sp) # Random samples a percentage of the data: training data
# Set the training data
x_train = x[np.random.choice(x.shape[0], m, replace=False)]
x_train_mean = x_train.mean(axis=0)
x_train_std = x_train.std(axis=0)
# Normalizes the data
x_train = (x_train - x_train_mean) / x_train_std
x = (x - x_train_mean) / x_train_std
# TODO must also Consider if there is a provided training set
matD = np.ones((n, m)) * np.inf
for i in range(n):
for j in range(m):
d = np.linalg.norm(x[i] - x_train[j])
if d > 0: # We avoid having distances = 0 (Which correspond with the sampled points)
matD[i, j] = d
# Finds the nearest neighbors
near_neigh = np.argsort(matD, axis=1)
# Selects only the k nearest neighbors
# k_near = near_neigh[:, 1:k] # The number one is to avoid matching them with themselves
k_near = near_neigh[:, :k]
# Create distance matrix
dist_k = np.zeros((n, k))
for i in range(n):
dist_k[i] = matD[i, k_near[i]]
# dist_k is an ordered vector
dk_mean = np.mean(dist_k, axis=1)
dk_max = np.max(dk_mean)
dk_min = np.min(dk_mean)
ri = c * (eps + dk_min + dk_max - dk_mean) # Computes the kernel radius
# Note that we are only using the k nearest neighbors to compute the kernel radius
d_all = np.sort(matD, axis=1)[:, :-1]
# We avoid taking the last distance as is not 'fair' for the sampled elements
aux = np.zeros((n, m - 1))
for i in range(n):
aux[i] = d_all[i] / ri[i]
ro = np.mean(np.exp(-(aux ** 2)), axis=1) # Computes the density
ro = np.max([ro, np.ones(n)*eps], axis=0) # to avoid density = 0
localOutlierScore = np.log(np.divide(np.mean(ro[k_near]), ro)) # Returns the local outlier score,
# it might be interesting to normalize it or to be able to compare it with different outlier scores
out = pd.DataFrame()
out['anomalyScore'] = localOutlierScore
out['timestamp'] = data.index
self.send_json(
out.to_json(date_format='iso', orient='records'))
| mit |
ycaihua/scikit-learn | sklearn/linear_model/logistic.py | 6 | 55848 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_loss_grad_hess(w, X, Y, alpha, sample_weight):
"""
Provides multinomial loss, gradient, and a function for computing hessian
vector product.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return loss, grad, hessp
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr'):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class can be either 'multinomial' or 'ovr'"
"got %s" % multi_class)
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers. got %s" % solver)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s cannot solve problems with "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties, got %s penalty." % penalty)
if dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"dual=False, got dual=%s" % dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if not coef.size in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size)
)
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1
)
)
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_loss_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_loss_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter
)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol
)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
if self.solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError(
"Logistic Regression supports only liblinear, newton-cg and "
"lbfgs solvers, Got solver=%s" % self.solver
)
if self.solver == 'liblinear' and self.multi_class == 'multinomial':
raise ValueError("Solver %s does not support a multinomial "
"backend." % self.solver)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class should be either ovr or multinomial "
"got %s" % self.multi_class)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol
)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so in general it is supposed to be faster.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1) depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.solver != 'liblinear':
if self.penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties.")
if self.dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"the primal form.")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class backend should be either "
"'ovr' or 'multinomial'"
" got %s" % self.multi_class)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning
)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = _check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight == 'auto'):
raise ValueError("class_weight provided should be a "
"dict or 'auto'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([
coefs_paths[i][best_indices[i]]
for i in range(len(folds))
], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
thientu/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
chenchen2015/CheckeeInfo-Scraper | visa_scraper.py | 1 | 3355 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 4 18:50:16 2017
@author: Chen Chen
@version: 1.2
@license: MIT License
"""
import urllib.request
import time
from datetime import datetime
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
visaURL = 'https://www.checkee.info/main.php?dispdate={0:04d}-{1:02d}'
# Get current time
timestamp = time.time()
yearNow = datetime.fromtimestamp(timestamp).year
monthNow = datetime.fromtimestamp(timestamp).month
currentTime = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S')
# File handle
time_string = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d')
filename = f'VISA-Data-{time_string}.csv'
# Initialize dataframe
df = pd.DataFrame(
columns=[
'UserName',
'VisaType',
'VisaEntry',
'City',
'Major',
'VisaStatus',
'CheckDate',
'CompleteDate',
'WaitDays'
]
)
# set column datatypes
df['UserName'] = pd.Series([], dtype=np.str)
df['VisaType'] = pd.Categorical([])
df['VisaEntry'] = pd.Categorical([])
df['Major'] = pd.Categorical([])
df['VisaStatus'] = pd.Categorical([])
df['City'] = pd.Categorical([])
df['CheckDate'] = pd.Series([], dtype='datetime64[ns]')
df['CompleteDate'] = pd.Series([], dtype='datetime64[ns]')
df['WaitDays'] = pd.Series([], dtype=np.int8)
# Main Loop
for yr in range(2009,yearNow+1):
for mo in range(1,13):
if yr == yearNow and mo > monthNow:
break
# Scrape a new page
visaurl = visaURL.format(yr,mo)
req = urllib.request.Request(visaurl, headers={'User-Agent': 'Mozilla/5.0'})
html = urllib.request.urlopen(req).read()
visapage = BeautifulSoup(html, 'html5lib')
# Only get the completed cases - those marked in green
tabEntry = visapage.find_all('tr', attrs={'bgcolor':'#4CBB17'})
print("Scraping Entry: {0}-{1:02d}, {2} records".format(yr,mo,len(tabEntry)))
for idx in range(len(tabEntry)):
userName = tabEntry[idx]('td')[1].text.replace(',','')
visaType = tabEntry[idx]('td')[2].text.replace(',','')
visaEntry = tabEntry[idx]('td')[3].text.replace(',','')
city = tabEntry[idx]('td')[4].text.replace(',','')
major = tabEntry[idx]('td')[5].text.replace(',','')
status = tabEntry[idx]('td')[-5].text.replace(',','')
checkDate = tabEntry[idx]('td')[-4].text.replace(',','')
completeDate = tabEntry[idx]('td')[-3].text.replace(',','')
waitDays = tabEntry[idx]('td')[-2].text.replace(',','')
#print("caseID: {0:05d}, Visa Type: {1}".format(caseID, visaType))
df = df.append(
{
'UserID' : caseID,
'UserName' : userName,
'VisaType' : visaType,
'VisaEntry' : visaEntry,
'City' : city,
'Major' : major,
'VisaStatus' : status,
'CheckDate' : checkDate,
'CompleteDate' : completeDate,
'WaitDays' : waitDays
},
ignore_index=True
)
caseID += 1
df.to_csv(filename)
print('All Done!') | mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/doc/mpl_examples/pylab_examples/tricontour_vs_griddata.py | 3 | 1234 | """
Comparison of griddata and tricontour for an unstructured triangular grid.
"""
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
import time
seed(0)
npts = 200
ngridx = 100
ngridy = 200
x = uniform(-2,2,npts)
y = uniform(-2,2,npts)
z = x*np.exp(-x**2-y**2)
# griddata and contour.
start = time.clock()
plt.subplot(211)
xi = np.linspace(-2.1,2.1,ngridx)
yi = np.linspace(-2.1,2.1,ngridy)
zi = griddata(x,y,z,xi,yi,interp='linear')
plt.contour(xi,yi,zi,15,linewidths=0.5,colors='k')
plt.contourf(xi,yi,zi,15,cmap=plt.cm.jet)
plt.colorbar() # draw colorbar
plt.plot(x, y, 'ko', ms=3)
plt.xlim(-2,2)
plt.ylim(-2,2)
plt.title('griddata and contour (%d points, %d grid points)' % (npts, ngridx*ngridy))
print 'griddata and contour seconds:', time.clock() - start
# tricontour.
start = time.clock()
plt.subplot(212)
triang = tri.Triangulation(x, y)
plt.tricontour(x, y, z, 15, linewidths=0.5, colors='k')
plt.tricontourf(x, y, z, 15, cmap=plt.cm.jet)
plt.colorbar()
plt.plot(x, y, 'ko', ms=3)
plt.xlim(-2,2)
plt.ylim(-2,2)
plt.title('tricontour (%d points)' % npts)
print 'tricontour seconds:', time.clock() - start
plt.show()
| gpl-2.0 |
Akshay0724/scikit-learn | sklearn/utils/sparsetools/tests/test_traversal.py | 38 | 2018 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
| bsd-3-clause |
ric2b/Vivaldi-browser | chromium/tools/perf/cli_tools/pinpoint_cli/histograms_df_test.py | 5 | 4115 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from cli_tools.pinpoint_cli import histograms_df
from core.external_modules import pandas
from tracing.value import histogram
from tracing.value import histogram_set
from tracing.value.diagnostics import date_range
from tracing.value.diagnostics import generic_set
def TestHistogram(name, units, values, **kwargs):
def DiagnosticValue(value):
if isinstance(value, (int, long)):
return date_range.DateRange(value)
elif isinstance(value, list):
return generic_set.GenericSet(value)
else:
raise NotImplementedError(type(value))
hist = histogram.Histogram(name, units)
hist.diagnostics.update(
(key, DiagnosticValue(value)) for key, value in kwargs.iteritems())
for value in values:
hist.AddSample(value)
return hist
@unittest.skipIf(pandas is None, 'pandas not available')
class TestHistogramsDf(unittest.TestCase):
def testIterRows(self):
run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
# Second run on same device ten minutes later.
run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
hists = histogram_set.HistogramSet([
TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'],
traceUrls=['http://url/to/trace1'], **run1),
TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'],
traceUrls=['http://url/to/trace2'], **run1),
TestHistogram('memory', 'sizeInBytes', [512], stories=['story2'],
traceUrls=['http://url/to/trace3'], **run2),
])
expected = [
('startup', 'ms', 10.0, 2.0, 3, 'run1', 'system_health',
'story1', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace1'),
('memory', 'sizeInBytes', 256.0, 0.0, 1, 'run1', 'system_health',
'story2', '2009-02-13 23:31:30', 'device1', 'http://url/to/trace2'),
('memory', 'sizeInBytes', 512.0, 0.0, 1, 'run2', 'system_health',
'story2', '2009-02-13 23:41:30', 'device1', 'http://url/to/trace3'),
]
self.assertItemsEqual(histograms_df.IterRows(hists.AsDicts()), expected)
def testDataFrame(self):
run1 = {'benchmarkStart': 1234567890000, 'labels': ['run1'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
# Second run on same device ten minutes later.
run2 = {'benchmarkStart': 1234567890000 + 600000, 'labels': ['run2'],
'benchmarks': ['system_health'], 'deviceIds': ['device1']}
hists = histogram_set.HistogramSet([
TestHistogram('startup', 'ms', [8, 10, 12], stories=['story1'],
traceUrls=['http://url/to/trace1'], **run1),
TestHistogram('memory', 'sizeInBytes', [256], stories=['story2'],
traceUrls=['http://url/to/trace2'], **run1),
TestHistogram('memory', 'sizeInBytes', [384], stories=['story2'],
traceUrls=['http://url/to/trace3'], **run2),
])
df = histograms_df.DataFrame(hists.AsDicts())
# Poke at the data frame and check a few known facts about our fake data:
# It has 3 histograms.
self.assertEqual(len(df), 3)
# The benchmark has two stories.
self.assertItemsEqual(df['story'].unique(), ['story1', 'story2'])
# We recorded three traces.
self.assertEqual(len(df['trace_url'].unique()), 3)
# All benchmarks ran on the same device.
self.assertEqual(len(df['device_id'].unique()), 1)
# There is a memory regression between runs 1 and 2.
memory = df.set_index(['name', 'run_label']).loc['memory']['mean']
self.assertEqual(memory['run2'] - memory['run1'], 128.0)
# Ten minutes passed between the two benchmark runs.
self.assertEqual(df['benchmark_start'].max() - df['benchmark_start'].min(),
pandas.Timedelta('10 minutes'))
| bsd-3-clause |
gertingold/scipy | scipy/special/_precompute/lambertw.py | 9 | 2043 | """Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt
except ImportError:
pass
def lambertw_pade():
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
pkruskal/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
mojoboss/scikit-learn | sklearn/utils/multiclass.py | 92 | 13986 | # Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
import warnings
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_sequence_of_sequence(y):
if hasattr(y, '__array__'):
y = np.asarray(y)
return set(chain.from_iterable(y))
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-sequences': _unique_sequence_of_sequence,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1] for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %r" % ys)
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_label_indicator_matrix(y):
""" Check if ``y`` is in the label indicator matrix format (multilabel).
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a label indicator matrix format,
else ``False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_label_indicator_matrix
>>> is_label_indicator_matrix([0, 1, 0, 1])
False
>>> is_label_indicator_matrix([[1], [0, 2], []])
False
>>> is_label_indicator_matrix(np.array([[1, 0], [0, 0]]))
True
>>> is_label_indicator_matrix(np.array([[1], [0], [0]]))
False
>>> is_label_indicator_matrix(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def is_sequence_of_sequences(y):
""" Check if ``y`` is in the sequence of sequences format (multilabel).
This format is DEPRECATED.
Parameters
----------
y : sequence or array.
Returns
-------
out : bool,
Return ``True``, if ``y`` is a sequence of sequences else ``False``.
"""
# the explicit check for ndarray is for forward compatibility; future
# versions of Numpy might want to register ndarray as a Sequence
try:
if hasattr(y, '__array__'):
y = np.asarray(y)
out = (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types))
except (IndexError, TypeError):
return False
if out:
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
return out
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples] or sequence of sequences
Target values. In the multilabel case the nested sequences can
have variable lengths.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
return is_label_indicator_matrix(y) or is_sequence_of_sequences(y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-sequences': `y` is a sequence of sequences, a 1d
array-like of objects that are sequences of labels.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_sequence_of_sequences(y):
return 'multilabel-sequences'
elif is_label_indicator_matrix(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# known to fail in numpy 1.3 for array of arrays
return 'unknown'
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown'
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown'
elif y.ndim == 2 and y.shape[1] > 1:
suffix = '-multioutput'
else:
# column vector or 1d
suffix = ''
# check float and contains non-integer float values:
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
return 'continuous' + suffix
if len(np.unique(y)) <= 2:
assert not suffix, "2d binary array-like should be multilabel"
return 'binary'
else:
return 'multiclass' + suffix
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
wazeerzulfikar/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 51 | 12300 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
PatrickOReilly/scikit-learn | sklearn/neighbors/classification.py | 4 | 14363 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable, optional (default = 'uniform')
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights[inliers])],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
RayMick/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/tests/test_widgets.py | 9 | 7945 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
import matplotlib.widgets as widgets
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
from numpy.testing import assert_allclose
def get_ax():
fig, ax = plt.subplots(1, 1)
ax.plot([0, 200], [0, 200])
ax.set_aspect(1.0)
ax.figure.canvas.draw()
return ax
def do_event(tool, etype, button=1, xdata=0, ydata=0, key=None, step=1):
"""
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
"""
event = mock.Mock()
event.button = button
ax = tool.ax
event.x, event.y = ax.transData.transform([(xdata, ydata),
(xdata, ydata)])[00]
event.xdata, event.ydata = xdata, ydata
event.inaxes = ax
event.canvas = ax.figure.canvas
event.key = key
event.step = step
event.guiEvent = None
event.name = 'Custom'
func = getattr(tool, etype)
func(event)
@cleanup
def check_rectangle(**kwargs):
ax = get_ax()
def onselect(epress, erelease):
ax._got_onselect = True
assert epress.xdata == 100
assert epress.ydata == 100
assert erelease.xdata == 199
assert erelease.ydata == 199
tool = widgets.RectangleSelector(ax, onselect, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=199, ydata=199, button=1)
# purposely drag outside of axis for release
do_event(tool, 'release', xdata=250, ydata=250, button=1)
if kwargs.get('drawtype', None) not in ['line', 'none']:
assert_allclose(tool.geometry,
[[100., 100, 199, 199, 100], [100, 199, 199, 100, 100]],
err_msg=tool.geometry)
assert ax._got_onselect
def test_rectangle_selector():
check_rectangle()
check_rectangle(drawtype='line', useblit=False)
check_rectangle(useblit=True, button=1)
check_rectangle(drawtype='none', minspanx=10, minspany=10)
check_rectangle(minspanx=10, minspany=10, spancoords='pixels')
check_rectangle(rectprops=dict(fill=True))
@cleanup
def test_ellipse():
"""For ellipse, test out the key modifiers"""
ax = get_ax()
def onselect(epress, erelease):
pass
tool = widgets.EllipseSelector(ax, onselect=onselect,
maxdist=10, interactive=True)
tool.extents = (100, 150, 100, 150)
# drag the rectangle
do_event(tool, 'press', xdata=10, ydata=10, button=1,
key=' ')
do_event(tool, 'onmove', xdata=30, ydata=30, button=1)
do_event(tool, 'release', xdata=30, ydata=30, button=1)
assert tool.extents == (120, 170, 120, 170), tool.extents
# create from center
do_event(tool, 'on_key_press', xdata=100, ydata=100, button=1,
key='control')
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=125, ydata=125, button=1)
do_event(tool, 'on_key_release', xdata=100, ydata=100, button=1,
key='control')
assert tool.extents == (75, 125, 75, 125), tool.extents
# create a square
do_event(tool, 'on_key_press', xdata=10, ydata=10, button=1,
key='shift')
do_event(tool, 'press', xdata=10, ydata=10, button=1)
do_event(tool, 'onmove', xdata=35, ydata=30, button=1)
do_event(tool, 'release', xdata=35, ydata=30, button=1)
do_event(tool, 'on_key_release', xdata=10, ydata=10, button=1,
key='shift')
extents = [int(e) for e in tool.extents]
assert extents == [10, 35, 10, 34]
# create a square from center
do_event(tool, 'on_key_press', xdata=100, ydata=100, button=1,
key='ctrl+shift')
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=130, button=1)
do_event(tool, 'release', xdata=125, ydata=130, button=1)
do_event(tool, 'on_key_release', xdata=100, ydata=100, button=1,
key='ctrl+shift')
extents = [int(e) for e in tool.extents]
assert extents == [70, 129, 70, 130], extents
assert tool.geometry.shape == (2, 73)
assert_allclose(tool.geometry[:, 0], [70., 100])
@cleanup
def test_rectangle_handles():
ax = get_ax()
def onselect(epress, erelease):
pass
tool = widgets.RectangleSelector(ax, onselect=onselect,
maxdist=10, interactive=True)
tool.extents = (100, 150, 100, 150)
assert tool.corners == (
(100, 150, 150, 100), (100, 100, 150, 150))
assert tool.extents == (100, 150, 100, 150)
assert tool.edge_centers == (
(100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150))
assert tool.extents == (100, 150, 100, 150)
# grab a corner and move it
do_event(tool, 'press', xdata=100, ydata=100)
do_event(tool, 'onmove', xdata=120, ydata=120)
do_event(tool, 'release', xdata=120, ydata=120)
assert tool.extents == (120, 150, 120, 150)
# grab the center and move it
do_event(tool, 'press', xdata=132, ydata=132)
do_event(tool, 'onmove', xdata=120, ydata=120)
do_event(tool, 'release', xdata=120, ydata=120)
assert tool.extents == (108, 138, 108, 138)
# create a new rectangle
do_event(tool, 'press', xdata=10, ydata=10)
do_event(tool, 'onmove', xdata=100, ydata=100)
do_event(tool, 'release', xdata=100, ydata=100)
assert tool.extents == (10, 100, 10, 100)
@cleanup
def check_span(*args, **kwargs):
ax = get_ax()
def onselect(vmin, vmax):
ax._got_onselect = True
assert vmin == 100
assert vmax == 150
def onmove(vmin, vmax):
assert vmin == 100
assert vmax == 125
ax._got_on_move = True
if 'onmove_callback' in kwargs:
kwargs['onmove_callback'] = onmove
tool = widgets.SpanSelector(ax, onselect, *args, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=150, ydata=150, button=1)
assert ax._got_onselect
if 'onmove_callback' in kwargs:
assert ax._got_on_move
def test_span_selector():
check_span('horizontal', minspan=10, useblit=True)
check_span('vertical', onmove_callback=True, button=1)
check_span('horizontal', rectprops=dict(fill=True))
@cleanup
def check_lasso_selector(**kwargs):
ax = get_ax()
def onselect(verts):
ax._got_onselect = True
assert verts == [(100, 100), (125, 125), (150, 150)]
tool = widgets.LassoSelector(ax, onselect, **kwargs)
do_event(tool, 'press', xdata=100, ydata=100, button=1)
do_event(tool, 'onmove', xdata=125, ydata=125, button=1)
do_event(tool, 'release', xdata=150, ydata=150, button=1)
assert ax._got_onselect
def test_lasso_selector():
check_lasso_selector()
check_lasso_selector(useblit=False, lineprops=dict(color='red'))
check_lasso_selector(useblit=True, button=1)
| gpl-3.0 |
AstroVPK/kali | python/kali/kepler.py | 2 | 4984 | import math as math
import numpy as np
import urllib
import os as os
import sys as sys
import warnings
import fitsio
from fitsio import FITS, FITSHDR
import subprocess
import argparse
import pdb
from astropy import units
from astropy.coordinates import SkyCoord
try:
os.environ['DISPLAY']
except KeyError as Err:
warnings.warn('No display environment! Using matplotlib backend "Agg"')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
import kali.lc
except ImportError:
print('kali is not setup. Setup kali by sourcing bin/setup.sh')
sys.exit(1)
class keplerLC(kali.lc.lc):
window = 5
Day = 86164.090530833
integrationTime = 6.019802903
readTime = 0.5189485261
numIntegrationsSC = 9
numIntegrationsLC = 270
samplingIntervalSC = (integrationTime + readTime)*numIntegrationsSC/Day
samplingIntervalLC = (integrationTime + readTime)*numIntegrationsLC/Day
def _estimate_deltaT(self):
self.deltaT = self.samplingIntervalLC/(1.0 + self.z)
def _ingest_raw_lc(self):
self.pre_t = np.array(self.numPts*[0.0])
self.pre_y = np.array(self.numPts*[0.0])
self.pre_mask = np.array(self.numPts*[0.0])
for i in range(self.numPts):
self.pre_t[i] = i*self.deltaT
for i in range(self.rawNumPts):
t_idx = int(round(self.rawData[i, 0]/self.deltaT))
self.pre_t[t_idx] = self.rawData[i, 0]
self.pre_y[t_idx] = self.rawData[i, 1]
self.pre_mask[t_idx] = 1.0
for i in range(self.numPts):
if self.pre_mask[i] == 0.0:
self.pre_y[i] = np.nan
def _estimate_lc(self, window=5):
for pt in range(self.numCadences):
self.t[pt] = self.pre_t[pt+self.window]
self.y[pt] = self.pre_y[pt+self.window]
self.yerr[pt] = np.nanstd(self.pre_y[pt: pt+2*self.window+1])
self.mask[pt] = self.pre_mask[pt+self.window]
def read(self, name, band=None, path=None, ancillary=None, **kwargs):
self.z = kwargs.get('z', 0.0)
fileName = 'lcout_' + name + '.dat'
if path is None:
try:
self.path = os.environ['KEPLERDATADIR']
except KeyError:
raise KeyError('Environment variable "KEPLERDATADIR" not set! Please set "KEPLERDATADIR" to point \
where all KEPLER data lives first...')
else:
self.path = path
filePath = os.path.join(self.path, fileName)
self._name = str(name) # The name of the light curve (usually the object's name).
self._band = str(r'Kep') # The name of the photometric band (eg. HSC-I or SDSS-g etc..).
self._xunit = r'$t$~(MJD)' # Unit in which time is measured (eg. s, sec, seconds etc...).
self._yunit = r'$F$~($\mathrm{e^{-}}$)' # Unit in which the flux is measured (eg Wm^{-2} etc...).
self.rawData = np.loadtxt(filePath)
self.rawNumPts = self.rawData.shape[0]
self._estimate_deltaT()
self.numPts = int(math.ceil((self.rawData[-1, 0] - self.rawData[0, 0])/self.deltaT)) + 1
self.numCadences = self.numPts - 2*self.window
self._ingest_raw_lc()
self.t = np.array(self.numCadences*[0.0])
self.x = np.array(self.numCadences*[0.0])
self.y = np.array(self.numCadences*[0.0])
self.yerr = np.array(self.numCadences*[0.0])
self.mask = np.array(self.numCadences*[0.0])
self._estimate_lc()
self.t = np.require(self.t, requirements=['F', 'A', 'W', 'O', 'E'])
self.x = np.require(self.x, requirements=['F', 'A', 'W', 'O', 'E'])
self.y = np.require(self.y, requirements=['F', 'A', 'W', 'O', 'E'])
self.yerr = np.require(self.yerr, requirements=['F', 'A', 'W', 'O', 'E'])
self.mask = np.require(self.mask, requirements=['F', 'A', 'W', 'O', 'E'])
def write(self, name, path=None, **kwrags):
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-id', '--ID', type=str, default='211991001', help=r'EPIC ID')
parser.add_argument('-z', '--z', type=float, default='0.3056', help=r'object redshift')
parser.add_argument('-p', '--processing', type=str,
default='k2sff', help=r'sap/pdcsap/k2sff/k2sc/k2varcat etc...')
parser.add_argument('-c', '--campaign', type=str, default='c05', help=r'Campaign')
parser.add_argument('-goid', '--goID', type=str,
default='Edelson, Wehrle, Carini, Olling', help=r'Guest Observer ID')
parser.add_argument('-gopi', '--goPI', type=str,
default='GO5038, GO5053, GO5056, GO5096', help=r'Guest Observer PI')
args = parser.parse_args()
LC = k2LC(name=args.ID, band='Kep', z=args.z, processing=args.processing,
campaign=args.campaign, goid=args.goID, gopi=args.goPI)
LC.plot()
LC.plotacf()
LC.plotsf()
plt.show()
| gpl-2.0 |
rgommers/scipy | scipy/ndimage/filters.py | 12 | 55835 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections.abc import Iterable
import warnings
import numpy
import operator
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
from . import _ni_docstrings
__all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter',
'prewitt', 'sobel', 'generic_laplace', 'laplace',
'gaussian_laplace', 'generic_gradient_magnitude',
'gaussian_gradient_magnitude', 'correlate', 'convolve',
'uniform_filter1d', 'uniform_filter', 'minimum_filter1d',
'maximum_filter1d', 'minimum_filter', 'maximum_filter',
'rank_filter', 'median_filter', 'percentile_filter',
'generic_filter1d', 'generic_filter']
def _invalid_origin(origin, lenw):
return (origin < -(lenw // 2)) or (origin > (lenw - 1) // 2)
def _complex_via_real_components(func, input, weights, output, cval, **kwargs):
"""Complex convolution via a linear combination of real convolutions."""
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input and complex_weights:
# real component of the output
func(input.real, weights.real, output=output.real,
cval=numpy.real(cval), **kwargs)
output.real -= func(input.imag, weights.imag, output=None,
cval=numpy.imag(cval), **kwargs)
# imaginary component of the output
func(input.real, weights.imag, output=output.imag,
cval=numpy.real(cval), **kwargs)
output.imag += func(input.imag, weights.real, output=None,
cval=numpy.imag(cval), **kwargs)
elif complex_input:
func(input.real, weights, output=output.real, cval=numpy.real(cval),
**kwargs)
func(input.imag, weights, output=output.imag, cval=numpy.imag(cval),
**kwargs)
else:
if numpy.iscomplexobj(cval):
raise ValueError("Cannot provide a complex-valued cval when the "
"input is real.")
func(input, weights.real, output=output.real, cval=cval, **kwargs)
func(input, weights.imag, output=output.imag, cval=cval, **kwargs)
return output
@_ni_docstrings.docfiller
def correlate1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D correlation along the given axis.
The lines of the array along the given axis are correlated with the
given weights.
Parameters
----------
%(input)s
weights : array
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import correlate1d
>>> correlate1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([ 8, 26, 8, 12, 7, 28, 36, 9])
"""
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights:
weights = weights.conj()
weights = weights.astype(numpy.complex128, copy=False)
kwargs = dict(axis=axis, mode=mode, origin=origin)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(correlate1d, input, weights,
output, cval, **kwargs)
output = _ni_support._get_output(output, input)
weights = numpy.asarray(weights, dtype=numpy.float64)
if weights.ndim != 1 or weights.shape[0] < 1:
raise RuntimeError('no filter weights given')
if not weights.flags.contiguous:
weights = weights.copy()
axis = normalize_axis_index(axis, input.ndim)
if _invalid_origin(origin, len(weights)):
raise ValueError('Invalid origin; origin must satisfy '
'-(len(weights) // 2) <= origin <= '
'(len(weights)-1) // 2')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate1d(input, weights, axis, output, mode, cval,
origin)
return output
@_ni_docstrings.docfiller
def convolve1d(input, weights, axis=-1, output=None, mode="reflect",
cval=0.0, origin=0):
"""Calculate a 1-D convolution along the given axis.
The lines of the array along the given axis are convolved with the
given weights.
Parameters
----------
%(input)s
weights : ndarray
1-D sequence of numbers.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
convolve1d : ndarray
Convolved array with same shape as input
Examples
--------
>>> from scipy.ndimage import convolve1d
>>> convolve1d([2, 8, 0, 4, 1, 9, 9, 0], weights=[1, 3])
array([14, 24, 4, 13, 12, 36, 27, 0])
"""
weights = weights[::-1]
origin = -origin
if not len(weights) & 1:
origin -= 1
weights = numpy.asarray(weights)
if weights.dtype.kind == 'c':
# pre-conjugate here to counteract the conjugation in correlate1d
weights = weights.conj()
return correlate1d(input, weights, axis, output, mode, cval, origin)
def _gaussian_kernel1d(sigma, order, radius):
"""
Computes a 1-D Gaussian convolution kernel.
"""
if order < 0:
raise ValueError('order must be non-negative')
exponent_range = numpy.arange(order + 1)
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius+1)
phi_x = numpy.exp(-0.5 / sigma2 * x ** 2)
phi_x = phi_x / phi_x.sum()
if order == 0:
return phi_x
else:
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
q = numpy.zeros(order + 1)
q[0] = 1
D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
P = numpy.diag(numpy.ones(order)/-sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
for _ in range(order):
q = Q_deriv.dot(q)
q = (x[:, None] ** exponent_range).dot(q)
return q * phi_x
@_ni_docstrings.docfiller
def gaussian_filter1d(input, sigma, axis=-1, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""1-D Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar
standard deviation for Gaussian kernel
%(axis)s
order : int, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. A positive order corresponds to convolution with
that derivative of a Gaussian.
%(output)s
%(mode_reflect)s
%(cval)s
truncate : float, optional
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter1d : ndarray
Examples
--------
>>> from scipy.ndimage import gaussian_filter1d
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 1)
array([ 1.42704095, 2.06782203, 3. , 3.93217797, 4.57295905])
>>> gaussian_filter1d([1.0, 2.0, 3.0, 4.0, 5.0], 4)
array([ 2.91948343, 2.95023502, 3. , 3.04976498, 3.08051657])
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> x = rng.standard_normal(101).cumsum()
>>> y3 = gaussian_filter1d(x, 3)
>>> y6 = gaussian_filter1d(x, 6)
>>> plt.plot(x, 'k', label='original data')
>>> plt.plot(y3, '--', label='filtered, sigma=3')
>>> plt.plot(y6, ':', label='filtered, sigma=6')
>>> plt.legend()
>>> plt.grid()
>>> plt.show()
"""
sd = float(sigma)
# make the radius of the filter equal to truncate standard deviations
lw = int(truncate * sd + 0.5)
# Since we are calling correlate, not convolve, revert the kernel
weights = _gaussian_kernel1d(sigma, order, lw)[::-1]
return correlate1d(input, weights, axis, output, mode, cval, 0)
@_ni_docstrings.docfiller
def gaussian_filter(input, sigma, order=0, output=None,
mode="reflect", cval=0.0, truncate=4.0):
"""Multidimensional Gaussian filter.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : int or sequence of ints, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian.
%(output)s
%(mode_multiple)s
%(cval)s
truncate : float
Truncate the filter at this many standard deviations.
Default is 4.0.
Returns
-------
gaussian_filter : ndarray
Returned array of same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> from scipy.ndimage import gaussian_filter
>>> a = np.arange(50, step=2).reshape((5,5))
>>> a
array([[ 0, 2, 4, 6, 8],
[10, 12, 14, 16, 18],
[20, 22, 24, 26, 28],
[30, 32, 34, 36, 38],
[40, 42, 44, 46, 48]])
>>> gaussian_filter(a, sigma=1)
array([[ 4, 6, 8, 9, 11],
[10, 12, 14, 15, 17],
[20, 22, 24, 25, 27],
[29, 31, 33, 34, 36],
[35, 37, 39, 40, 42]])
>>> from scipy import misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = gaussian_filter(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
orders = _ni_support._normalize_sequence(order, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sigmas[ii], orders[ii], modes[ii])
for ii in range(len(axes)) if sigmas[ii] > 1e-15]
if len(axes) > 0:
for axis, sigma, order, mode in axes:
gaussian_filter1d(input, sigma, axis, order, output,
mode, cval, truncate)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def prewitt(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Prewitt filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.prewitt(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 1, 1], ii, output, modes[ii], cval, 0,)
return output
@_ni_docstrings.docfiller
def sobel(input, axis=-1, output=None, mode="reflect", cval=0.0):
"""Calculate a Sobel filter.
Parameters
----------
%(input)s
%(axis)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.sobel(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
output = _ni_support._get_output(output, input)
modes = _ni_support._normalize_sequence(mode, input.ndim)
correlate1d(input, [-1, 0, 1], axis, output, modes[axis], cval, 0)
axes = [ii for ii in range(input.ndim) if ii != axis]
for ii in axes:
correlate1d(output, [1, 2, 1], ii, output, modes[ii], cval, 0)
return output
@_ni_docstrings.docfiller
def generic_laplace(input, derivative2, output=None, mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None):
"""
N-D Laplace filter using a provided second derivative function.
Parameters
----------
%(input)s
derivative2 : callable
Callable with the following signature::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative2(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
for ii in range(1, len(axes)):
tmp = derivative2(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
output += tmp
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def laplace(input, output=None, mode="reflect", cval=0.0):
"""N-D Laplace filter based on approximate second derivatives.
Parameters
----------
%(input)s
%(output)s
%(mode_multiple)s
%(cval)s
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.laplace(ascent)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
def derivative2(input, axis, output, mode, cval):
return correlate1d(input, [1, -2, 1], axis, output, mode, cval, 0)
return generic_laplace(input, derivative2, output, mode, cval)
@_ni_docstrings.docfiller
def gaussian_laplace(input, sigma, output=None, mode="reflect",
cval=0.0, **kwargs):
"""Multidimensional Laplace filter using Gaussian second derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> ascent = misc.ascent()
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> result = ndimage.gaussian_laplace(ascent, sigma=1)
>>> ax1.imshow(result)
>>> result = ndimage.gaussian_laplace(ascent, sigma=3)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative2(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(input, sigma, order, output, mode, cval,
**kwargs)
return generic_laplace(input, derivative2, output, mode, cval,
extra_arguments=(sigma,),
extra_keywords=kwargs)
@_ni_docstrings.docfiller
def generic_gradient_magnitude(input, derivative, output=None,
mode="reflect", cval=0.0,
extra_arguments=(), extra_keywords=None):
"""Gradient magnitude using a provided gradient function.
Parameters
----------
%(input)s
derivative : callable
Callable with the following signature::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
See `extra_arguments`, `extra_keywords` below.
`derivative` can assume that `input` and `output` are ndarrays.
Note that the output from `derivative` is modified inplace;
be careful to copy important inputs before returning them.
%(output)s
%(mode_multiple)s
%(cval)s
%(extra_keywords)s
%(extra_arguments)s
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
output = _ni_support._get_output(output, input)
axes = list(range(input.ndim))
if len(axes) > 0:
modes = _ni_support._normalize_sequence(mode, len(axes))
derivative(input, axes[0], output, modes[0], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(output, output, output)
for ii in range(1, len(axes)):
tmp = derivative(input, axes[ii], output.dtype, modes[ii], cval,
*extra_arguments, **extra_keywords)
numpy.multiply(tmp, tmp, tmp)
output += tmp
# This allows the sqrt to work with a different default casting
numpy.sqrt(output, output, casting='unsafe')
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def gaussian_gradient_magnitude(input, sigma, output=None,
mode="reflect", cval=0.0, **kwargs):
"""Multidimensional gradient magnitude using Gaussian derivatives.
Parameters
----------
%(input)s
sigma : scalar or sequence of scalars
The standard deviations of the Gaussian filter are given for
each axis as a sequence, or as a single number, in which case
it is equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
Extra keyword arguments will be passed to gaussian_filter().
Returns
-------
gaussian_gradient_magnitude : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
def derivative(input, axis, output, mode, cval, sigma, **kwargs):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(input, sigma, order, output, mode,
cval, **kwargs)
return generic_gradient_magnitude(input, derivative, output, mode,
cval, extra_arguments=(sigma,),
extra_keywords=kwargs)
def _correlate_or_convolve(input, weights, output, mode, cval, origin,
convolution):
input = numpy.asarray(input)
weights = numpy.asarray(weights)
complex_input = input.dtype.kind == 'c'
complex_weights = weights.dtype.kind == 'c'
if complex_input or complex_weights:
if complex_weights and not convolution:
# As for numpy.correlate, conjugate weights rather than input.
weights = weights.conj()
kwargs = dict(
mode=mode, origin=origin, convolution=convolution
)
output = _ni_support._get_output(output, input, complex_output=True)
return _complex_via_real_components(_correlate_or_convolve, input,
weights, output, cval, **kwargs)
origins = _ni_support._normalize_sequence(origin, input.ndim)
weights = numpy.asarray(weights, dtype=numpy.float64)
wshape = [ii for ii in weights.shape if ii > 0]
if len(wshape) != input.ndim:
raise RuntimeError('filter weights array has incorrect shape.')
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
for ii in range(len(origins)):
origins[ii] = -origins[ii]
if not weights.shape[ii] & 1:
origins[ii] -= 1
for origin, lenw in zip(origins, wshape):
if _invalid_origin(origin, lenw):
raise ValueError('Invalid origin; origin must satisfy '
'-(weights.shape[k] // 2) <= origin[k] <= '
'(weights.shape[k]-1) // 2')
if not weights.flags.contiguous:
weights = weights.copy()
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError("A sequence of modes is not supported")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.correlate(input, weights, output, mode, cval, origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def correlate(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional correlation.
The array is correlated with the given kernel.
Parameters
----------
%(input)s
weights : ndarray
array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
result : ndarray
The result of correlation of `input` with `weights`.
See Also
--------
convolve : Convolve an image with a kernel.
Examples
--------
Correlation is the process of moving a filter mask often referred to
as kernel over the image and computing the sum of products at each location.
>>> from scipy.ndimage import correlate
>>> input_img = np.arange(25).reshape(5,5)
>>> print(input_img)
[[ 0 1 2 3 4]
[ 5 6 7 8 9]
[10 11 12 13 14]
[15 16 17 18 19]
[20 21 22 23 24]]
Define a kernel (weights) for correlation. In this example, it is for sum of
center and up, down, left and right next elements.
>>> weights = [[0, 1, 0],
... [1, 1, 1],
... [0, 1, 0]]
We can calculate a correlation result:
For example, element ``[2,2]`` is ``7 + 11 + 12 + 13 + 17 = 60``.
>>> correlate(input_img, weights)
array([[ 6, 10, 15, 20, 24],
[ 26, 30, 35, 40, 44],
[ 51, 55, 60, 65, 69],
[ 76, 80, 85, 90, 94],
[ 96, 100, 105, 110, 114]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, False)
@_ni_docstrings.docfiller
def convolve(input, weights, output=None, mode='reflect', cval=0.0,
origin=0):
"""
Multidimensional convolution.
The array is convolved with the given kernel.
Parameters
----------
%(input)s
weights : array_like
Array of weights, same number of dimensions as input
%(output)s
%(mode_reflect)s
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0
%(origin_multiple)s
Returns
-------
result : ndarray
The result of convolution of `input` with `weights`.
See Also
--------
correlate : Correlate an image with a kernel.
Notes
-----
Each value in result is :math:`C_i = \\sum_j{I_{i+k-j} W_j}`, where
W is the `weights` kernel,
j is the N-D spatial index over :math:`W`,
I is the `input` and k is the coordinate of the center of
W, specified by `origin` in the input parameters.
Examples
--------
Perhaps the simplest case to understand is ``mode='constant', cval=0.0``,
because in this case borders (i.e., where the `weights` kernel, centered
on any one value, extends beyond an edge of `input`) are treated as zeros.
>>> a = np.array([[1, 2, 0, 0],
... [5, 3, 0, 4],
... [0, 0, 0, 7],
... [9, 3, 0, 0]])
>>> k = np.array([[1,1,1],[1,1,0],[1,0,0]])
>>> from scipy import ndimage
>>> ndimage.convolve(a, k, mode='constant', cval=0.0)
array([[11, 10, 7, 4],
[10, 3, 11, 11],
[15, 12, 14, 7],
[12, 3, 7, 0]])
Setting ``cval=1.0`` is equivalent to padding the outer edge of `input`
with 1.0's (and then extracting only the original region of the result).
>>> ndimage.convolve(a, k, mode='constant', cval=1.0)
array([[13, 11, 8, 7],
[11, 3, 11, 14],
[16, 12, 14, 10],
[15, 6, 10, 5]])
With ``mode='reflect'`` (the default), outer values are reflected at the
edge of `input` to fill in missing values.
>>> b = np.array([[2, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0,1,0], [0,1,0], [0,1,0]])
>>> ndimage.convolve(b, k, mode='reflect')
array([[5, 0, 0],
[3, 0, 0],
[1, 0, 0]])
This includes diagonally at the corners.
>>> k = np.array([[1,0,0],[0,1,0],[0,0,1]])
>>> ndimage.convolve(b, k)
array([[4, 2, 0],
[3, 2, 0],
[1, 1, 0]])
With ``mode='nearest'``, the single nearest value in to an edge in
`input` is repeated as many times as needed to match the overlapping
`weights`.
>>> c = np.array([[2, 0, 1],
... [1, 0, 0],
... [0, 0, 0]])
>>> k = np.array([[0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0],
... [0, 1, 0]])
>>> ndimage.convolve(c, k, mode='nearest')
array([[7, 0, 3],
[5, 0, 2],
[3, 0, 1]])
"""
return _correlate_or_convolve(input, weights, output, mode, cval,
origin, True)
@_ni_docstrings.docfiller
def uniform_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D uniform filter along the given axis.
The lines of the array along the given axis are filtered with a
uniform filter of given size.
Parameters
----------
%(input)s
size : int
length of uniform filter
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Examples
--------
>>> from scipy.ndimage import uniform_filter1d
>>> uniform_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([4, 3, 4, 1, 4, 6, 6, 3])
"""
input = numpy.asarray(input)
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
complex_output = input.dtype.kind == 'c'
output = _ni_support._get_output(output, input,
complex_output=complex_output)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
if not complex_output:
_nd_image.uniform_filter1d(input, size, axis, output, mode, cval,
origin)
else:
_nd_image.uniform_filter1d(input.real, size, axis, output.real, mode,
numpy.real(cval), origin)
_nd_image.uniform_filter1d(input.imag, size, axis, output.imag, mode,
numpy.imag(cval), origin)
return output
@_ni_docstrings.docfiller
def uniform_filter(input, size=3, output=None, mode="reflect",
cval=0.0, origin=0):
"""Multidimensional uniform filter.
Parameters
----------
%(input)s
size : int or sequence of ints, optional
The sizes of the uniform filter are given for each axis as a
sequence, or as a single number, in which case the size is
equal for all axes.
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
uniform_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
The multidimensional filter is implemented as a sequence of
1-D uniform filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.uniform_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
input = numpy.asarray(input)
output = _ni_support._get_output(output, input,
complex_output=input.dtype.kind == 'c')
sizes = _ni_support._normalize_sequence(size, input.ndim)
origins = _ni_support._normalize_sequence(origin, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if len(axes) > 0:
for axis, size, origin, mode in axes:
uniform_filter1d(input, int(size), axis, output, mode,
cval, origin)
input = output
else:
output[...] = input[...]
return output
@_ni_docstrings.docfiller
def minimum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D minimum filter along the given axis.
The lines of the array along the given axis are filtered with a
minimum filter of given size.
Parameters
----------
%(input)s
size : int
length along which to calculate 1D minimum
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Notes
-----
This function implements the MINLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import minimum_filter1d
>>> minimum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([2, 0, 0, 0, 1, 1, 0, 0])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 1)
return output
@_ni_docstrings.docfiller
def maximum_filter1d(input, size, axis=-1, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a 1-D maximum filter along the given axis.
The lines of the array along the given axis are filtered with a
maximum filter of given size.
Parameters
----------
%(input)s
size : int
Length along which to calculate the 1-D maximum.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
Returns
-------
maximum1d : ndarray, None
Maximum-filtered array with same shape as input.
None if `output` is not None
Notes
-----
This function implements the MAXLIST algorithm [1]_, as described by
Richard Harter [2]_, and has a guaranteed O(n) performance, `n` being
the `input` length, regardless of filter size.
References
----------
.. [1] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.42.2777
.. [2] http://www.richardhartersworld.com/cri/2001/slidingmin.html
Examples
--------
>>> from scipy.ndimage import maximum_filter1d
>>> maximum_filter1d([2, 8, 0, 4, 1, 9, 9, 0], size=3)
array([8, 8, 8, 4, 9, 9, 9, 9])
"""
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
axis = normalize_axis_index(axis, input.ndim)
if size < 1:
raise RuntimeError('incorrect filter size')
output = _ni_support._get_output(output, input)
if (size // 2 + origin < 0) or (size // 2 + origin >= size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter1d(input, size, axis, output, mode, cval,
origin, 0)
return output
def _min_or_max_filter(input, size, footprint, structure, output, mode,
cval, origin, minimum):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
if structure is None:
if footprint is None:
if size is None:
raise RuntimeError("no footprint provided")
separable = True
else:
footprint = numpy.asarray(footprint, dtype=bool)
if not footprint.any():
raise ValueError("All-zero footprint is not supported.")
if footprint.all():
size = footprint.shape
footprint = None
separable = True
else:
separable = False
else:
structure = numpy.asarray(structure, dtype=numpy.float64)
separable = False
if footprint is None:
footprint = numpy.ones(structure.shape, bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
origins = _ni_support._normalize_sequence(origin, input.ndim)
if separable:
sizes = _ni_support._normalize_sequence(size, input.ndim)
modes = _ni_support._normalize_sequence(mode, input.ndim)
axes = list(range(input.ndim))
axes = [(axes[ii], sizes[ii], origins[ii], modes[ii])
for ii in range(len(axes)) if sizes[ii] > 1]
if minimum:
filter_ = minimum_filter1d
else:
filter_ = maximum_filter1d
if len(axes) > 0:
for axis, size, origin, mode in axes:
filter_(input, int(size), axis, output, mode, cval, origin)
input = output
else:
output[...] = input[...]
else:
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
if structure is not None:
if len(structure.shape) != input.ndim:
raise RuntimeError('structure array has incorrect shape')
if not structure.flags.contiguous:
structure = structure.copy()
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported for non-separable "
"footprints")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.min_or_max_filter(input, footprint, structure, output,
mode, cval, origins, minimum)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def minimum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional minimum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
minimum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.minimum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 1)
@_ni_docstrings.docfiller
def maximum_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional maximum filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_multiple)s
%(cval)s
%(origin_multiple)s
Returns
-------
maximum_filter : ndarray
Filtered array. Has the same shape as `input`.
Notes
-----
A sequence of modes (one per axis) is only supported when the footprint is
separable. Otherwise, a single mode string must be provided.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.maximum_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _min_or_max_filter(input, size, footprint, None, output, mode,
cval, origin, 0)
@_ni_docstrings.docfiller
def _rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0, operation='rank'):
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=3)
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
filter_size = numpy.where(footprint, 1, 0).sum()
if operation == 'median':
rank = filter_size // 2
elif operation == 'percentile':
percentile = rank
if percentile < 0.0:
percentile += 100.0
if percentile < 0 or percentile > 100:
raise RuntimeError('invalid percentile')
if percentile == 100.0:
rank = filter_size - 1
else:
rank = int(float(filter_size) * percentile / 100.0)
if rank < 0:
rank += filter_size
if rank < 0 or rank >= filter_size:
raise RuntimeError('rank not within filter footprint size')
if rank == 0:
return minimum_filter(input, None, footprint, output, mode, cval,
origins)
elif rank == filter_size - 1:
return maximum_filter(input, None, footprint, output, mode, cval,
origins)
else:
output = _ni_support._get_output(output, input)
temp_needed = numpy.may_share_memory(input, output)
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _ni_support._get_output(output.dtype, input)
if not isinstance(mode, str) and isinstance(mode, Iterable):
raise RuntimeError(
"A sequence of modes is not supported by non-separable rank "
"filters")
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.rank_filter(input, rank, footprint, output, mode, cval,
origins)
if temp_needed:
temp[...] = output
output = temp
return output
@_ni_docstrings.docfiller
def rank_filter(input, rank, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional rank filter.
Parameters
----------
%(input)s
rank : int
The rank parameter may be less then zero, i.e., rank = -1
indicates the largest element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
rank_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.rank_filter(ascent, rank=42, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
rank = operator.index(rank)
return _rank_filter(input, rank, size, footprint, output, mode, cval,
origin, 'rank')
@_ni_docstrings.docfiller
def median_filter(input, size=None, footprint=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Calculate a multidimensional median filter.
Parameters
----------
%(input)s
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
median_filter : ndarray
Filtered array. Has the same shape as `input`.
See also
--------
scipy.signal.medfilt2d
Notes
-----
For 2-dimensional images with ``uint8``, ``float32`` or ``float64`` dtypes
the specialised function `scipy.signal.medfilt2d` may be faster. It is
however limited to constant mode with ``cval=0``.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.median_filter(ascent, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, 0, size, footprint, output, mode, cval,
origin, 'median')
@_ni_docstrings.docfiller
def percentile_filter(input, percentile, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""Calculate a multidimensional percentile filter.
Parameters
----------
%(input)s
percentile : scalar
The percentile parameter may be less then zero, i.e.,
percentile = -20 equals percentile = 80
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
Returns
-------
percentile_filter : ndarray
Filtered array. Has the same shape as `input`.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.gray() # show the filtered result in grayscale
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.percentile_filter(ascent, percentile=20, size=20)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
"""
return _rank_filter(input, percentile, size, footprint, output, mode,
cval, origin, 'percentile')
@_ni_docstrings.docfiller
def generic_filter1d(input, function, filter_size, axis=-1,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a 1-D filter along the given axis.
`generic_filter1d` iterates over the lines of the array, calling the
given function at each line. The arguments of the line are the
input line, and the output line. The input and output lines are 1-D
double arrays. The input line is extended appropriately according
to the filter size and origin. The output line must be modified
in-place with the result.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply along given axis.
filter_size : scalar
Length of the filter.
%(axis)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int function(double *input_line, npy_intp input_length,
double *output_line, npy_intp output_length,
void *user_data)
int function(double *input_line, intptr_t input_length,
double *output_line, intptr_t output_length,
void *user_data)
The calling function iterates over the lines of the input and output
arrays, calling the callback function at each line. The current line
is extended according to the border conditions set by the calling
function, and the result is copied into the array that is passed
through ``input_line``. The length of the input line (after extension)
is passed through ``input_length``. The callback function should apply
the filter and store the result in the array passed through
``output_line``. The length of the output line is passed through
``output_length``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if filter_size < 1:
raise RuntimeError('invalid filter size')
axis = normalize_axis_index(axis, input.ndim)
if (filter_size // 2 + origin < 0) or (filter_size // 2 + origin >=
filter_size):
raise ValueError('invalid origin')
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter1d(input, function, filter_size, axis, output,
mode, cval, origin, extra_arguments,
extra_keywords)
return output
@_ni_docstrings.docfiller
def generic_filter(input, function, size=None, footprint=None,
output=None, mode="reflect", cval=0.0, origin=0,
extra_arguments=(), extra_keywords=None):
"""Calculate a multidimensional filter using the given function.
At each element the provided function is called. The input values
within the filter footprint at that element are passed to the function
as a 1-D array of double values.
Parameters
----------
%(input)s
function : {callable, scipy.LowLevelCallable}
Function to apply at each element.
%(size_foot)s
%(output)s
%(mode_reflect)s
%(cval)s
%(origin_multiple)s
%(extra_arguments)s
%(extra_keywords)s
Notes
-----
This function also accepts low-level callback functions with one of
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int callback(double *buffer, npy_intp filter_size,
double *return_value, void *user_data)
int callback(double *buffer, intptr_t filter_size,
double *return_value, void *user_data)
The calling function iterates over the elements of the input and
output arrays, calling the callback function at each element. The
elements within the footprint of the filter at the current element are
passed through the ``buffer`` parameter, and the number of elements
within the footprint through ``filter_size``. The calculated value is
returned in ``return_value``. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
"""
if (size is not None) and (footprint is not None):
warnings.warn("ignoring size because footprint is set", UserWarning, stacklevel=2)
if extra_keywords is None:
extra_keywords = {}
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
origins = _ni_support._normalize_sequence(origin, input.ndim)
if footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _ni_support._normalize_sequence(size, input.ndim)
footprint = numpy.ones(sizes, dtype=bool)
else:
footprint = numpy.asarray(footprint, dtype=bool)
fshape = [ii for ii in footprint.shape if ii > 0]
if len(fshape) != input.ndim:
raise RuntimeError('filter footprint array has incorrect shape.')
for origin, lenf in zip(origins, fshape):
if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
raise ValueError('invalid origin')
if not footprint.flags.contiguous:
footprint = footprint.copy()
output = _ni_support._get_output(output, input)
mode = _ni_support._extend_mode_to_code(mode)
_nd_image.generic_filter(input, function, footprint, output, mode,
cval, origins, extra_arguments, extra_keywords)
return output
| bsd-3-clause |
joernhees/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
BennettLandman/pyPheWAS | pyPheWAS/maximize_control_matching.py | 1 | 4799 | import pandas as pd
import operator
import random
import numpy as np
import sys
import getopt
from hopcroftkarp import HopcroftKarp
"""
"""
CATEGORICAL_DATA = '675161f1c87ff2648c61ff1c57c780f2'
def generate_row_query(keys, deltas, tr):
q = []
for i,dt in enumerate(deltas):
key = keys[i]
is_categorical = dt == CATEGORICAL_DATA
if is_categorical:
part = '=='.join([key, tr[key].__repr__()])
else:
structure = ['abs(', key, '-',tr[key],')', '<', dt]
part = ''.join([str(x) for x in structure])
q.append(part)
return '&'.join(q)
def get_options(targets, controls, keys, deltas):
tt = targets[keys]
c = controls[keys]
matching = {}
if len(c) > len(tt):
for i in tt.index:
tr = tt.loc[i]
control_query = generate_row_query(keys, deltas, tr)
matches = c.query(control_query).index
matching[i] = matches.drop_duplicates().tolist()
# matching[i] = set(matches)
else:
for i in c.index:
tr = c.loc[i]
target_query = generate_row_query(keys, deltas, tr)
matches = tt.query(target_query).index
matching[i] = matches.drop_duplicates().tolist()
# matching[i] = set(matches)
return matching
def generate_matches(matching, goal):
# Sort the targets by the number of controls they match
frequency = { k : len(v) for k,v in matching.items() }
frequency = sorted(frequency.items(), key=operator.itemgetter(1))
success = True
# Keep track of the already used controls
used = []
# The final mapping of targets : [control list]
final = {}
for key,_ in frequency:
final[key] = []
viable = matching[key]
random.shuffle(viable)
for control in viable:
if len(final[key]) == goal:
break
if control not in used:
used.append(control)
final[key].append(control)
if len(final[key]) < goal:
success = False
return (final, used, success, goal)
def maximize_matches(matching):
prev = generate_matches(matching, 1)
while prev[2] == False:
return prev
# If 1-1 matching was successful, attempt to maximize starting from 2
success = prev[2]
goal = 2
while success:
curr = generate_matches(matching, goal)
success = curr[2]
if success:
prev = curr
goal += 1
return prev
def output_matches(path, outputfile, data, all_used, success, matched):
new_data = data[data.index.isin(all_used)]
if not success:
print("Could not match 1-1, using the maximum number of matches found by the approximation algorithm")
if '%s' in outputfile:
outputfile = outputfile % ('max')
else:
print("Matched data 1-%s" % (matched))
if '%s' in outputfile:
outputfile = outputfile % (matched)
new_data.to_csv(path + outputfile,index=False)
print("Data in %s" % (path + outputfile))
def control_match(path, inputfile, outputfile, keys, deltas, condition='genotype',goal=-1):
# Reformat arguments into Python format
keys = keys.split('+')
deltas = deltas.split(',')
deltas = [CATEGORICAL_DATA if x == '' else int(x) for x in deltas]
# Read data from the provided input file
data = pd.read_csv(path + inputfile)
# Assert that all of the provided keys are present in the data
for key in keys:
assert key in data.columns, '%s not a column in the input file (%s)' % (key, inputfile)
# Assign new value for outputfile
if not outputfile:
outputfile = '1-%s_' + inputfile
# Separate patients and controls
targets = data[data[condition] == 1]
controls = data[data[condition] == 0]
match_by_control = len(targets) > len(controls)
matching = get_options(targets, controls, keys, deltas)
if goal != -1:
final, used, success, matched = generate_matches(matching, goal)
if success:
if match_by_control:
all_used = used + controls.index.tolist()
else:
all_used = used + targets.index.tolist()
output_matches(path, outputfile, data, all_used, success, matched)
# return
else:
print("Failed to perform 1-%s, attempting to maximize..." % (goal))
while not success:
goal = 1
print(deltas)
deltas = [element + 1 if element != CATEGORICAL_DATA else element for element in deltas]
matching = get_options(targets, controls, keys, deltas)
final, used, success, matched = generate_matches(matching, goal)
print("Used %s as delta values across keys. Generated a 1-%s match." % (deltas, goal))
final, used, success, matched = maximize_matches(matching)
if match_by_control:
all_used = used + controls.index.tolist()
else:
all_used = used + targets.index.tolist()
output_matches(path, outputfile, data, all_used, success, matched)
if goal==-1:
final, used, success, matched = maximize_matches(matching)
#all_used = used + targets.index.tolist()
if match_by_control:
all_used = used + controls.index.tolist()
else:
all_used = used + targets.index.tolist()
output_matches(path, outputfile, data, all_used, success, matched)
| mit |
plotly/python-api | packages/python/plotly/plotly/tests/test_core/test_px/test_px_functions.py | 1 | 11286 | import plotly.express as px
import plotly.graph_objects as go
from numpy.testing import assert_array_equal
import numpy as np
import pandas as pd
import pytest
def _compare_figures(go_trace, px_fig):
"""Compare a figure created with a go trace and a figure created with
a px function call. Check that all values inside the go Figure are the
same in the px figure (which sets more parameters).
"""
go_fig = go.Figure(go_trace)
go_fig = go_fig.to_plotly_json()
px_fig = px_fig.to_plotly_json()
del go_fig["layout"]["template"]
del px_fig["layout"]["template"]
for key in go_fig["data"][0]:
assert_array_equal(go_fig["data"][0][key], px_fig["data"][0][key])
for key in go_fig["layout"]:
assert go_fig["layout"][key] == px_fig["layout"][key]
def test_pie_like_px():
# Pie
labels = ["Oxygen", "Hydrogen", "Carbon_Dioxide", "Nitrogen"]
values = [4500, 2500, 1053, 500]
fig = px.pie(names=labels, values=values)
trace = go.Pie(labels=labels, values=values)
_compare_figures(trace, fig)
labels = ["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"]
parents = ["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"]
values = [10, 14, 12, 10, 2, 6, 6, 4, 4]
# Sunburst
fig = px.sunburst(names=labels, parents=parents, values=values)
trace = go.Sunburst(labels=labels, parents=parents, values=values)
_compare_figures(trace, fig)
# Treemap
fig = px.treemap(names=labels, parents=parents, values=values)
trace = go.Treemap(labels=labels, parents=parents, values=values)
_compare_figures(trace, fig)
# Funnel
x = ["A", "B", "C"]
y = [3, 2, 1]
fig = px.funnel(y=y, x=x)
trace = go.Funnel(y=y, x=x)
_compare_figures(trace, fig)
# Funnelarea
fig = px.funnel_area(values=y, names=x)
trace = go.Funnelarea(values=y, labels=x)
_compare_figures(trace, fig)
def test_sunburst_treemap_colorscales():
labels = ["Eve", "Cain", "Seth", "Enos", "Noam", "Abel", "Awan", "Enoch", "Azura"]
parents = ["", "Eve", "Eve", "Seth", "Seth", "Eve", "Eve", "Awan", "Eve"]
values = [10, 14, 12, 10, 2, 6, 6, 4, 4]
for func, colorway in zip(
[px.sunburst, px.treemap], ["sunburstcolorway", "treemapcolorway"]
):
# Continuous colorscale
fig = func(
names=labels,
parents=parents,
values=values,
color=values,
color_continuous_scale="Viridis",
range_color=(5, 15),
)
assert fig.layout.coloraxis.cmin, fig.layout.coloraxis.cmax == (5, 15)
# Discrete colorscale, color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
parents=parents,
values=values,
color=labels,
color_discrete_sequence=color_seq,
)
assert np.all([col in color_seq for col in fig.data[0].marker.colors])
# Numerical color arg passed, fall back to continuous
fig = func(names=labels, parents=parents, values=values, color=values,)
assert [
el[0] == px.colors.sequential.Viridis
for i, el in enumerate(fig.layout.coloraxis.colorscale)
]
# Numerical color arg passed, continuous colorscale
# even if color_discrete_sequence if passed
fig = func(
names=labels,
parents=parents,
values=values,
color=values,
color_discrete_sequence=color_seq,
)
assert [
el[0] == px.colors.sequential.Viridis
for i, el in enumerate(fig.layout.coloraxis.colorscale)
]
# Discrete colorscale, no color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
parents=parents,
values=values,
color_discrete_sequence=color_seq,
)
assert list(fig.layout[colorway]) == color_seq
def test_sunburst_treemap_with_path():
vendors = ["A", "B", "C", "D", "E", "F", "G", "H"]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
"Tech",
"Tech",
"Finance",
"Finance",
]
regions = ["North", "North", "North", "North", "South", "South", "South", "South"]
values = [1, 3, 2, 4, 2, 2, 1, 4]
total = ["total",] * 8
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
)
)
path = ["total", "regions", "sectors", "vendors"]
# No values
fig = px.sunburst(df, path=path)
assert fig.data[0].branchvalues == "total"
# Values passed
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].branchvalues == "total"
assert fig.data[0].values[-1] == np.sum(values)
# Values passed
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].branchvalues == "total"
assert fig.data[0].values[-1] == np.sum(values)
# Continuous colorscale
fig = px.sunburst(df, path=path, values="values", color="values")
assert "coloraxis" in fig.data[0].marker
assert np.all(np.array(fig.data[0].marker.colors) == np.array(fig.data[0].values))
# Error when values cannot be converted to numerical data type
df["values"] = ["1 000", "3 000", "2", "4", "2", "2", "1 000", "4 000"]
msg = "Column `values` of `df` could not be converted to a numerical data type."
with pytest.raises(ValueError, match=msg):
fig = px.sunburst(df, path=path, values="values")
# path is a mixture of column names and array-like
path = [df.total, "regions", df.sectors, "vendors"]
fig = px.sunburst(df, path=path)
assert fig.data[0].branchvalues == "total"
def test_sunburst_treemap_with_path_and_hover():
df = px.data.tips()
fig = px.sunburst(
df, path=["sex", "day", "time", "smoker"], color="smoker", hover_data=["smoker"]
)
assert "smoker" in fig.data[0].hovertemplate
def test_sunburst_treemap_with_path_color():
vendors = ["A", "B", "C", "D", "E", "F", "G", "H"]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
"Tech",
"Tech",
"Finance",
"Finance",
]
regions = ["North", "North", "North", "North", "South", "South", "South", "South"]
values = [1, 3, 2, 4, 2, 2, 1, 4]
calls = [8, 2, 1, 3, 2, 2, 4, 1]
total = ["total",] * 8
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
calls=calls,
)
)
path = ["total", "regions", "sectors", "vendors"]
fig = px.sunburst(df, path=path, values="values", color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
fig = px.sunburst(df, path=path, color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
# Hover info
df["hover"] = [el.lower() for el in vendors]
fig = px.sunburst(df, path=path, color="calls", hover_data=["hover"])
custom = fig.data[0].customdata
assert np.all(custom[:8, 0] == df["hover"])
assert np.all(custom[8:, 0] == "(?)")
assert np.all(custom[:8, 1] == df["calls"])
# Discrete color
fig = px.sunburst(df, path=path, color="vendors")
assert len(np.unique(fig.data[0].marker.colors)) == 9
# Discrete color and color_discrete_map
cmap = {"Tech": "yellow", "Finance": "magenta", "(?)": "black"}
fig = px.sunburst(df, path=path, color="sectors", color_discrete_map=cmap)
assert np.all(np.in1d(fig.data[0].marker.colors, list(cmap.values())))
# Numerical column in path
df["regions"] = df["regions"].map({"North": 1, "South": 2})
path = ["total", "regions", "sectors", "vendors"]
fig = px.sunburst(df, path=path, values="values", color="calls")
colors = fig.data[0].marker.colors
assert np.all(np.array(colors[:8]) == np.array(calls))
def test_sunburst_treemap_with_path_non_rectangular():
vendors = ["A", "B", "C", "D", None, "E", "F", "G", "H", None]
sectors = [
"Tech",
"Tech",
"Finance",
"Finance",
None,
"Tech",
"Tech",
"Finance",
"Finance",
"Finance",
]
regions = [
"North",
"North",
"North",
"North",
"North",
"South",
"South",
"South",
"South",
"South",
]
values = [1, 3, 2, 4, 1, 2, 2, 1, 4, 1]
total = ["total",] * 10
df = pd.DataFrame(
dict(
vendors=vendors,
sectors=sectors,
regions=regions,
values=values,
total=total,
)
)
path = ["total", "regions", "sectors", "vendors"]
msg = "Non-leaves rows are not permitted in the dataframe"
with pytest.raises(ValueError, match=msg):
fig = px.sunburst(df, path=path, values="values")
df.loc[df["vendors"].isnull(), "sectors"] = "Other"
fig = px.sunburst(df, path=path, values="values")
assert fig.data[0].values[-1] == np.sum(values)
def test_pie_funnelarea_colorscale():
labels = ["A", "B", "C", "D"]
values = [3, 2, 1, 4]
for func, colorway in zip(
[px.sunburst, px.treemap], ["sunburstcolorway", "treemapcolorway"]
):
# Discrete colorscale, no color arg passed
color_seq = px.colors.sequential.Reds
fig = func(names=labels, values=values, color_discrete_sequence=color_seq,)
assert list(fig.layout[colorway]) == color_seq
# Discrete colorscale, color arg passed
color_seq = px.colors.sequential.Reds
fig = func(
names=labels,
values=values,
color=labels,
color_discrete_sequence=color_seq,
)
assert np.all([col in color_seq for col in fig.data[0].marker.colors])
def test_funnel():
fig = px.funnel(
x=[5, 4, 3, 3, 2, 1],
y=["A", "B", "C", "A", "B", "C"],
color=["0", "0", "0", "1", "1", "1"],
)
assert len(fig.data) == 2
def test_parcats_dimensions_max():
df = px.data.tips()
# default behaviour
fig = px.parallel_categories(df)
assert [d.label for d in fig.data[0].dimensions] == [
"sex",
"smoker",
"day",
"time",
"size",
]
# explicit subset of default
fig = px.parallel_categories(df, dimensions=["sex", "smoker", "day"])
assert [d.label for d in fig.data[0].dimensions] == ["sex", "smoker", "day"]
# shrinking max
fig = px.parallel_categories(df, dimensions_max_cardinality=4)
assert [d.label for d in fig.data[0].dimensions] == [
"sex",
"smoker",
"day",
"time",
]
# explicit superset of default, violating the max
fig = px.parallel_categories(
df, dimensions=["sex", "smoker", "day", "size"], dimensions_max_cardinality=4
)
assert [d.label for d in fig.data[0].dimensions] == ["sex", "smoker", "day", "size"]
| mit |
diegocavalca/Studies | phd-thesis/nilmtk/nilmtk/dataset_converters/refit/convert_refit.py | 1 | 4266 | '''
REFIT dataset converter for the clean version avaiable at the URLs below:
"REFIT: Electrical Load Measurements (Cleaned)"
https://pure.strath.ac.uk/portal/en/datasets/refit-electrical-load-measurements-cleaned(9ab14b0e-19ac-4279-938f-27f643078cec).html
https://pure.strath.ac.uk/portal/files/52873459/Processed_Data_CSV.7z
The original version of the dataset include duplicated timestamps.
Check the dataset website for more information.
For citation of the dataset, use:
http://dx.doi.org/10.1038/sdata.2016.122
'''
from __future__ import print_function, division
import pandas as pd
import numpy as np
from copy import deepcopy
from os.path import join, isdir, isfile
from os import listdir
import fnmatch
import re
from sys import stdout
from nilmtk.utils import get_datastore
from nilmtk.datastore import Key
from nilmtk.timeframe import TimeFrame
from nilmtk.measurement import LEVEL_NAMES
from nilmtk.utils import get_module_directory, check_directory_exists
from nilm_metadata import convert_yaml_to_hdf5, save_yaml_to_datastore
def convert_refit(input_path, output_filename, format='HDF'):
"""
Parameters
----------
input_path : str
The root path of the CSV files, e.g. House1.csv
output_filename : str
The destination filename (including path and suffix).
format : str
format of output. Either 'HDF' or 'CSV'. Defaults to 'HDF'
"""
# Open DataStore
store = get_datastore(output_filename, format, mode='w')
# Convert raw data to DataStore
_convert(input_path, store, 'Europe/London')
# Add metadata
save_yaml_to_datastore(join(get_module_directory(),
'dataset_converters',
'refit',
'metadata'),
store)
store.close()
print("Done converting REFIT to HDF5!")
def _convert(input_path, store, tz, sort_index=True):
"""
Parameters
----------
input_path : str
The root path of the REFIT dataset.
store : DataStore
The NILMTK DataStore object.
measurement_mapping_func : function
Must take these parameters:
- house_id
- chan_id
Function should return a list of tuples e.g. [('power', 'active')]
tz : str
Timezone e.g. 'US/Eastern'
sort_index : bool
"""
check_directory_exists(input_path)
# Iterate though all houses and channels
# house 14 is missing!
houses = [1,2,3,4,5,6,7,8,9,10,11,12,13,15,16,17,18,19,20,21]
nilmtk_house_id = 0
for house_id in houses:
nilmtk_house_id += 1
print("Loading house", house_id, end="... ")
stdout.flush()
csv_filename = join(input_path, 'House_' + str(house_id) + '.csv')
# The clean version already includes header, so we
# just skip the text version of the timestamp
usecols = ['Unix','Aggregate','Appliance1','Appliance2','Appliance3','Appliance4','Appliance5','Appliance6','Appliance7','Appliance8','Appliance9']
df = _load_csv(csv_filename, usecols, tz)
if sort_index:
df = df.sort_index() # might not be sorted...
chan_id = 0
for col in df.columns:
chan_id += 1
print(chan_id, end=" ")
stdout.flush()
key = Key(building=nilmtk_house_id, meter=chan_id)
chan_df = pd.DataFrame(df[col])
chan_df.columns = pd.MultiIndex.from_tuples([('power', 'active')])
# Modify the column labels to reflect the power measurements recorded.
chan_df.columns.set_names(LEVEL_NAMES, inplace=True)
store.put(str(key), chan_df)
print('')
def _load_csv(filename, usecols, tz):
"""
Parameters
----------
filename : str
usecols : list of columns to keep
tz : str e.g. 'US/Eastern'
Returns
-------
dataframe
"""
# Load data
df = pd.read_csv(filename, usecols=usecols)
# Convert the integer index column to timezone-aware datetime
df['Unix'] = pd.to_datetime(df['Unix'], unit='s', utc=True)
df.set_index('Unix', inplace=True)
df = df.tz_convert(tz)
return df
| cc0-1.0 |
anhquan0412/deeplearning_fastai | deeplearning2/utils2.py | 1 | 4651 | import math, keras, datetime, pandas as pd, numpy as np, keras.backend as K, threading, json, re, collections
import tarfile, tensorflow as tf, matplotlib.pyplot as plt, xgboost, operator, random, pickle, glob, os, bcolz
import shutil, sklearn, functools, itertools, scipy
from PIL import Image
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import matplotlib.patheffects as PathEffects
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import NearestNeighbors, LSHForest
import IPython
from IPython.display import display, Audio
from numpy.random import normal
from gensim.models import word2vec
from keras.preprocessing.text import Tokenizer
from nltk.tokenize import ToktokTokenizer, StanfordTokenizer
from functools import reduce
from itertools import chain
from tensorflow.python.framework import ops
#from tensorflow.contrib import rnn, legacy_seq2seq as seq2seq
from keras_tqdm import TQDMNotebookCallback
#from keras import initializations # Keras 1
from keras.applications.resnet50 import ResNet50, decode_predictions, conv_block, identity_block
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model, Sequential
from keras.layers import *
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import decode_predictions, preprocess_input
np.set_printoptions(threshold=50, edgeitems=20)
def beep(): return Audio(filename='/home/jhoward/beep.mp3', autoplay=True)
def dump(obj, fname): pickle.dump(obj, open(fname, 'wb'))
def load(fname): return pickle.load(open(fname, 'rb'))
def limit_mem():
K.get_session().close()
cfg = K.tf.ConfigProto()
cfg.gpu_options.allow_growth = True
K.set_session(K.tf.Session(config=cfg))
def autolabel(plt, fmt='%.2f'):
rects = plt.patches
ax = rects[0].axes
y_bottom, y_top = ax.get_ylim()
y_height = y_top - y_bottom
for rect in rects:
height = rect.get_height()
if height / y_height > 0.95:
label_position = height - (y_height * 0.06)
else:
label_position = height + (y_height * 0.01)
txt = ax.text(rect.get_x() + rect.get_width()/2., label_position,
fmt % height, ha='center', va='bottom')
txt.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='w')])
def column_chart(lbls, vals, val_lbls='%.2f'):
n = len(lbls)
p = plt.bar(np.arange(n), vals)
plt.xticks(np.arange(n), lbls)
if val_lbls: autolabel(p, val_lbls)
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def load_glove(loc):
return (load_array(loc+'.dat'),
pickle.load(open(loc+'_words.pkl','rb'), encoding='latin1'),
pickle.load(open(loc+'_idx.pkl','rb'), encoding='latin1'))
def plot_multi(im, dim=(4,4), figsize=(6,6), **kwargs ):
plt.figure(figsize=figsize)
for i,img in enumerate(im):
plt.subplot(*dim, i+1)
plt.imshow(img, **kwargs)
plt.axis('off')
plt.tight_layout()
def plot_train(hist):
h = hist.history
if 'acc' in h:
meas='acc'
loc='lower right'
else:
meas='loss'
loc='upper right'
plt.plot(hist.history[meas])
plt.plot(hist.history['val_'+meas])
plt.title('model '+meas)
plt.ylabel(meas)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc=loc)
def fit_gen(gen, fn, eval_fn, nb_iter):
for i in range(nb_iter):
fn(*next(gen))
if i % (nb_iter//10) == 0: eval_fn()
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def copy_model(m):
res = Sequential(copy_layers(m.layers))
copy_weights(m.layers, res.layers)
return res
def insert_layer(model, new_layer, index):
res = Sequential()
for i,layer in enumerate(model.layers):
if i==index: res.add(new_layer)
copied = layer_from_config(wrap_config(layer))
res.add(copied)
copied.set_weights(layer.get_weights())
return res
| apache-2.0 |
BavoGoosens/Gaiter | feature_extraction/frequency_domain_feature_extractor.py | 1 | 7795 | from feature_extractor import *
from data_utils.featured_frame import *
from matplotlib.mlab import entropy as en
import scipy.fftpack as ff
import numpy as np
class FrequencyDomainFeatureExtractor(FeatureExtractor):
def __init__(self, derivative=True):
super(FrequencyDomainFeatureExtractor, self).__init__(derivative)
def extract_features(self, frame):
if not isinstance(frame, FeaturedFrame):
frame = FeaturedFrame(frame)
# add coefficients
self.add_spectral_coefficients(frame)
if self.derivative:
# add coefficients derivative
self.add_spectral_coefficients(frame, True)
# add features
self.add_spectral_mean(frame)
self.add_spectral_energy(frame)
self.add_spectral_variance(frame)
self.add_spectral_std(frame)
self.add_dc_component(frame)
self.add_spectral_entropy(frame)
if self.derivative:
# add features derivative
self.add_spectral_mean(frame, True)
self.add_spectral_energy(frame, True)
self.add_spectral_variance(frame, True)
self.add_spectral_std(frame, True)
self.add_spectral_entropy(frame, True)
return frame
# ADD COEFFICIENTS
def add_spectral_coefficients(self, frame, derivative=False):
der = ''
if derivative:
x_axis = frame.get_derivative('x')
y_axis = frame.get_derivative('y')
z_axis = frame.get_derivative('z')
der = '_der'
else:
x_axis = frame.get_x_data()
y_axis = frame.get_y_data()
z_axis = frame.get_z_data()
frame.add_coefficients('x_spectral_cos' + der, self.calculate_spectral_coefficients(x_axis))
frame.add_coefficients('y_spectral_cos' + der, self.calculate_spectral_coefficients(y_axis))
frame.add_coefficients('z_spectral_cos' + der, self.calculate_spectral_coefficients(z_axis))
def add_mel_scale_coefficients(self, frame):
x_axis = frame.get_x_data()
y_axis = frame.get_y_data()
z_axis = frame.get_z_data()
if self.has_coefficients(frame, 'spectral'):
frame.add_coefficients('x_MFCC', self.calculate_mel_scale_coefficients(frame, x_axis))
frame.add_coefficients('y_MFCC', self.calculate_mel_scale_coefficients(frame, y_axis))
frame.add_coefficients('z_MFCC', self.calculate_mel_scale_coefficients(frame, z_axis))
else:
self.add_spectral_coefficients(frame)
def calculate_spectral_coefficients(self, data):
return ff.fft(data)
def calculate_mel_scale_coefficients(self, frame, data):
complex_spectrum = frame.get_coefficients('x_spectral_cos')
power_spectrum = abs(complex_spectrum) ** 2
filtered_spectrum = np.dot(power_spectrum, self.melFilterBank(256))
log_spectrum = np.log(filtered_spectrum)
dctSpectrum = ff.dct(log_spectrum, type=2)
# ADD FEATURES
def add_spectral_mean(self, frame, derivative=False):
der = ''
if derivative:
der = '_der'
frame.add_feature('x_spectral_mean' + der, np.mean(frame.get_coefficients('x_spectral_cos' + der)))
frame.add_feature('y_spectral_mean' + der, np.mean(frame.get_coefficients('y_spectral_cos' + der)))
frame.add_feature('z_spectral_mean' + der, np.mean(frame.get_coefficients('z_spectral_cos' + der)))
def add_spectral_energy(self, frame, derivative=False):
der = ''
if derivative:
der = '_der'
frame.add_feature('x_spectral_energy' + der,
np.mean(np.power(frame.get_coefficients('x_spectral_cos' + der), 2)))
frame.add_feature('y_spectral_energy' + der,
np.mean(np.power(frame.get_coefficients('y_spectral_cos' + der), 2)))
frame.add_feature('z_spectral_energy' + der,
np.mean(np.power(frame.get_coefficients('z_spectral_cos' + der), 2)))
def add_spectral_variance(self, frame, derivative=False):
der = ''
if derivative:
der = '_der'
frame.add_feature('x_spectral_variance' + der, np.var(frame.get_coefficients('x_spectral_cos' + der)))
frame.add_feature('y_spectral_variance' + der, np.var(frame.get_coefficients('y_spectral_cos' + der)))
frame.add_feature('z_spectral_variance' + der, np.var(frame.get_coefficients('z_spectral_cos' + der)))
def add_spectral_std(self, frame, derivative=False):
der = ''
if derivative:
der = '_der'
frame.add_feature('x_spectral_std' + der, np.std(frame.get_coefficients('x_spectral_cos' + der)))
frame.add_feature('y_spectral_std' + der, np.std(frame.get_coefficients('y_spectral_cos' + der)))
frame.add_feature('z_spectral_std' + der, np.std(frame.get_coefficients('z_spectral_cos' + der)))
def add_dc_component(self, frame):
x_axis, y_axis, z_axis = frame.get_x_data(), frame.get_y_data(), frame.get_z_data()
'''x_spectral, y_spectral, z_spectral = self.calculate_spectral_coefficients(x_axis), self.calculate_spectral_coefficients(y_axis), self.calculate_spectral_coefficients(z_axis)
x_dc = 0 if len(x_spectral) == 0 else x_spectral[0]
y_dc = 0 if len(y_spectral) == 0 else y_spectral[0]
z_dc = 0 if len(z_spectral) == 0 else z_spectral[0]'''
x_dc = np.mean(x_axis)
y_dc = np.mean(y_axis)
z_dc = np.mean(z_axis)
frame.add_feature('x_dc', x_dc)
frame.add_feature('y_dc', y_dc)
frame.add_feature('z_dc', z_dc)
def add_spectral_entropy(self, frame, derivative=False):
der = ''
if derivative:
der = '_der'
x = frame.get_coefficients('x_spectral_cos' + der)
y = frame.get_coefficients('y_spectral_cos' + der)
z = frame.get_coefficients('z_spectral_cos' + der)
_, binsx = np.histogram(x)
e1 = en(x, binsx)
frame.add_feature('x_spectral_entropy' + der, e1)
_, binsy = np.histogram(y)
e2 = en(y, binsy)
frame.add_feature('y_spectral_entropy' + der, e2)
_, binsz = np.histogram(z)
e3 = en(z, binsz)
frame.add_feature('z_spectral_entropy' + der, e3)
def melFilterBank(self, blockSize):
numCoefficients = 13 # choose the size of mfcc array
minHz = 0
maxHz = 50
numBands = int(numCoefficients)
maxMel = int(self.freqToMel(maxHz))
minMel = int(self.freqToMel(minHz))
# Create a matrix for triangular filters, one row per filter
filterMatrix = np.zeros((numBands, blockSize))
melRange = np.array(xrange(numBands + 2))
melCenterFilters = melRange * (maxMel - minMel) / (numBands + 1) + minMel
# each array index represent the center of each triangular filter
aux = np.log(1 + 1000.0 / 700.0) / 1000.0
aux = (np.exp(melCenterFilters * aux) - 1) / 22050
aux = 0.5 + 700 * blockSize * aux
aux = np.floor(aux) # round down
centerIndex = np.array(aux, int) # Get int values
for i in xrange(numBands):
start, centre, end = centerIndex[i:i + 3]
k1 = np.float32(centre - start)
k2 = np.float32(end - centre)
up = (np.array(xrange(start, centre)) - start) / k1
down = (end - np.array(xrange(centre, end))) / k2
filterMatrix[i][start:centre] = up
filterMatrix[i][centre:end] = down
return filterMatrix.transpose()
def freqToMel(self, freq):
return 1127.01048 * np.math.log(1 + freq / 700.0)
def melToFreq(self, mel):
return 700 * (np.math.exp(mel / 1127.01048 - 1))
| mit |
JeanKossaifi/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
tomlof/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/groupby/test_index_as_string.py | 4 | 2023 | import pytest
import pandas as pd
import numpy as np
from pandas.util.testing import assert_frame_equal, assert_series_equal
@pytest.fixture(params=[['inner'], ['inner', 'outer']])
def frame(request):
levels = request.param
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
if levels:
df = df.set_index(levels)
return df
@pytest.fixture()
def series():
df = pd.DataFrame({'outer': ['a', 'a', 'a', 'b', 'b', 'b'],
'inner': [1, 2, 3, 1, 2, 3],
'A': np.arange(6),
'B': ['one', 'one', 'two', 'two', 'one', 'one']})
s = df.set_index(['outer', 'inner', 'B'])['A']
return s
@pytest.mark.parametrize('key_strs,groupers', [
('inner', # Index name
pd.Grouper(level='inner')
),
(['inner'], # List of index name
[pd.Grouper(level='inner')]
),
(['B', 'inner'], # Column and index
['B', pd.Grouper(level='inner')]
),
(['inner', 'B'], # Index and column
[pd.Grouper(level='inner'), 'B'])])
def test_grouper_index_level_as_string(frame, key_strs, groupers):
result = frame.groupby(key_strs).mean()
expected = frame.groupby(groupers).mean()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('levels', [
'inner', 'outer', 'B',
['inner'], ['outer'], ['B'],
['inner', 'outer'], ['outer', 'inner'],
['inner', 'outer', 'B'], ['B', 'outer', 'inner']
])
def test_grouper_index_level_as_string_series(series, levels):
# Compute expected result
if isinstance(levels, list):
groupers = [pd.Grouper(level=lv) for lv in levels]
else:
groupers = pd.Grouper(level=levels)
expected = series.groupby(groupers).mean()
# Compute and check result
result = series.groupby(levels).mean()
assert_series_equal(result, expected)
| bsd-3-clause |
carolinux/QGIS | python/plugins/processing/algs/qgis/PolarPlot.py | 19 | 3223 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from matplotlib.pyplot import figure
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class PolarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Polar plot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Polar plot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
fig = figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
N = len(values[valuefieldname])
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / N)
radii = values[valuefieldname]
width = 2 * np.pi / N
ax.bar(theta, radii, width=width, bottom=0.0)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
ChanderG/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
DonBeo/statsmodels | statsmodels/sandbox/examples/ex_mixed_lls_0.py | 34 | 5233 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects and random coefficients, and uses OneWayMixed to estimate it.
"""
from __future__ import print_function
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 4 #number fixed effects
nz = 2 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print(dir(m)
#print(vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
print(np.sqrt(np.diag(np.cov(b_re, rowvar=0))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print(np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print('')
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print(res.llf) #based on MLE, does not include constant
print(res.tvalues)
print(res.pvalues)
print(res.t_test([1,-1,0,0,0,0]))
print('test mean of both random effects variables is zero')
print(res.f_test([[0,0,0,0,1,0], [0,0,0,0,0,1]]))
plots = res.plot_random_univariate(bins=50)
fig = res.plot_scatter_pairs(0, 1)
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
amandersillinois/landlab | landlab/plot/drainage_plot.py | 3 | 3618 | """Plot drainage network."""
import matplotlib.pylab as plt
import numpy as np
from landlab.plot.imshow import imshow_grid
# KRB, FEB 2017.
def drainage_plot(
mg,
surface="topographic__elevation",
receivers=None,
proportions=None,
surf_cmap="gray",
quiver_cmap="viridis",
title="Drainage Plot",
):
if isinstance(surface, str):
colorbar_label = surface
else:
colorbar_label = "topographic_elevation"
imshow_grid(mg, surface, cmap=surf_cmap, colorbar_label=colorbar_label)
if receivers is None:
receivers = mg.at_node["flow__receiver_node"]
if proportions is None:
if "flow__receiver_proportions" in mg.at_node:
proportions = mg.at_node["flow__receiver_proportions"]
else:
receivers = np.asarray(receivers)
if receivers.ndim == 1:
receivers = np.expand_dims(receivers, axis=1)
nreceievers = receivers.shape[-1]
propColor = plt.get_cmap(quiver_cmap)
for j in range(nreceievers):
rec = receivers[:, j]
is_bad = rec == -1
xdist = -0.8 * (mg.x_of_node - mg.x_of_node[rec])
ydist = -0.8 * (mg.y_of_node - mg.y_of_node[rec])
if proportions is None:
proportions = np.ones_like(receivers, dtype=float)
is_bad[proportions[:, j] == 0.0] = True
xdist[is_bad] = np.nan
ydist[is_bad] = np.nan
prop = proportions[:, j] * 256.0
lu = np.floor(prop)
colors = propColor(lu.astype(int))
shape = (mg.number_of_nodes, 1)
plt.quiver(
mg.x_of_node.reshape(shape),
mg.y_of_node.reshape(shape),
xdist.reshape(shape),
ydist.reshape(shape),
color=colors,
angles="xy",
scale_units="xy",
scale=1,
zorder=3,
)
# Plot differen types of nodes:
(o,) = plt.plot(
mg.x_of_node[mg.status_at_node == mg.BC_NODE_IS_CORE],
mg.y_of_node[mg.status_at_node == mg.BC_NODE_IS_CORE],
"b.",
label="Core Nodes",
zorder=4,
)
(fg,) = plt.plot(
mg.x_of_node[mg.status_at_node == mg.BC_NODE_IS_FIXED_VALUE],
mg.y_of_node[mg.status_at_node == mg.BC_NODE_IS_FIXED_VALUE],
"c.",
label="Fixed Gradient Nodes",
zorder=5,
)
(fv,) = plt.plot(
mg.x_of_node[mg.status_at_node == mg.BC_NODE_IS_FIXED_GRADIENT],
mg.y_of_node[mg.status_at_node == mg.BC_NODE_IS_FIXED_GRADIENT],
"g.",
label="Fixed Value Nodes",
zorder=6,
)
(c,) = plt.plot(
mg.x_of_node[mg.status_at_node == mg.BC_NODE_IS_CLOSED],
mg.y_of_node[mg.status_at_node == mg.BC_NODE_IS_CLOSED],
"r.",
label="Closed Nodes",
zorder=7,
)
node_id = np.arange(mg.number_of_nodes)
flow_to_self = receivers[:, 0] == node_id
(fts,) = plt.plot(
mg.x_of_node[flow_to_self],
mg.y_of_node[flow_to_self],
"kx",
markersize=6,
label="Flows To Self",
zorder=8,
)
ax = plt.gca()
ax.legend(
labels=[
"Core Nodes",
"Fixed Gradient Nodes",
"Fixed Value Nodes",
"Closed Nodes",
"Flows To Self",
],
handles=[o, fg, fv, c, fts],
numpoints=1,
loc="center left",
bbox_to_anchor=(1.7, 0.5),
)
sm = plt.cm.ScalarMappable(cmap=propColor, norm=plt.Normalize(vmin=0, vmax=1))
sm._A = []
cx = plt.colorbar(sm)
cx.set_label("Proportion of Flow")
plt.title(title)
| mit |
iShoto/testpy | codes/20200105_metric_learning_mnist_query_and_gallery/src/train_mnist_original_center.py | 1 | 5158 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd.function import Function
import torchvision
import matplotlib.pyplot as plt
import argparse
from tqdm import trange
import numpy as np
from sklearn.metrics import classification_report
from losses import CenterLoss
from mnist_net import Net
import mnist_loader
# cf. https://cpp-learning.com/center-loss/
def main():
args = parse_args()
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Dataset
train_loader, test_loader, classes = mnist_loader.load_dataset(args.dataset_dir, img_show=True)
# Model
model = Net().to(device)
print(model)
# Loss
nllloss = nn.NLLLoss().to(device) # CrossEntropyLoss = log_softmax + NLLLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# Optimizer
dnn_optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(dnn_optimizer, 20, gamma=0.8)
center_optimizer = optim.SGD(centerloss.parameters(), lr =0.5)
print('Start training...')
for epoch in range(100):
# Update parameters.
epoch += 1
sheduler.step()
# Train and test a model.
train_acc, train_loss, feat, labels = train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer)
test_acc, test_loss = test(device, test_loader, model, nllloss, loss_weight, centerloss)
stdout_temp = 'Epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'
print(stdout_temp.format(epoch, train_acc, train_loss, test_acc, test_loss))
# Visualize features of each class.
vis_img_path = args.vis_img_path_temp.format(str(epoch).zfill(3))
visualize(feat.data.cpu().numpy(), labels.data.cpu().numpy(), epoch, vis_img_path)
# Save a trained model.
model_path = args.model_path_temp.format(str(epoch).zfill(3))
torch.save(model.state_dict(), model_path)
def train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer):
running_loss = 0.0
pred_list = []
label_list = []
ip1_loader = []
idx_loader = []
model.train()
for i,(imgs, labels) in enumerate(train_loader):
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Initilize gradient.
dnn_optimizer.zero_grad()
center_optimizer.zero_grad()
# Calculate gradient.
loss.backward()
# Upate parameters.
dnn_optimizer.step()
center_optimizer.step()
# For calculation.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# For visualization.
ip1_loader.append(ip1)
idx_loader.append((labels))
result = classification_report(pred_list, label_list, output_dict=True)
train_acc = round(result['weighted avg']['f1-score'], 6)
train_loss = round(running_loss / len(train_loader.dataset), 6)
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
return train_acc, train_loss, feat, labels
def test(device, test_loader, model, nllloss, loss_weight, centerloss):
model = model.eval()
# Prediciton
running_loss = 0.0
pred_list = []
label_list = []
with torch.no_grad():
for i,(imgs, labels) in enumerate(test_loader):
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Append predictions and labels.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# Calculate accuracy.
result = classification_report(pred_list, label_list, output_dict=True)
test_acc = round(result['weighted avg']['f1-score'], 6)
test_loss = round(running_loss / len(test_loader.dataset), 6)
return test_acc, test_loss
def visualize(feat, labels, epoch, vis_img_path):
colors = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.figure()
for i in range(10):
plt.plot(feat[labels==i, 0], feat[labels==i, 1], '.', color=colors[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='best')
plt.xlim(left=-8, right=8)
plt.ylim(bottom=-8, top=8)
plt.text(-7.8, 7.3, "epoch=%d" % epoch)
plt.savefig(vis_img_path)
plt.clf()
def parse_args():
arg_parser = argparse.ArgumentParser(description="parser for focus one")
arg_parser.add_argument("--dataset_dir", type=str, default='D:/workspace/datasets')
arg_parser.add_argument("--model_path_temp", type=str, default='../outputs/models/checkpoints/mnist_original_softmax_center_epoch_{}.pth')
arg_parser.add_argument("--vis_img_path_temp", type=str, default='../outputs/visual/epoch_{}.png')
args = arg_parser.parse_args()
return args
if __name__ == "__main__":
main()
| mit |
nabobalis/PhDThesis | Chapter2/Code/figuires.py | 1 | 18207 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 4 12:00:30 2015
@author: nabobalis
"""
from __future__ import division
import pycwt as wavelet
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from scipy.io import readsav
import scipy.fftpack as fftpack
import matplotlib.animation as anim
import glob
from matplotlib.image import NonUniformImage
from scipy.stats import pearsonr
from scipy.signal import detrend
def area_inten(bound,lim_inten):
area = np.zeros([bound.shape[0]])
inten = np.zeros([bound.shape[0]])
pore = np.zeros(bound.shape,dtype=np.int)
for i in range(0,bound.shape[0]):
pore[i] = (bound[i] <= lim_inten[i])
area[i] = len(pore[i].nonzero()[0])
inten[i] = np.sum(bound[i][pore[i].nonzero()])
return area, inten
def find_closest(array, target):
idx = array.searchsorted(target) #array must be sorted
idx = np.clip(idx, 1, len(array)-1)
left = array[idx-1]
right = array[idx]
idx -= target - left < right - target
return idx
def wavel(signal,cadence):
mother='morlet'
sig_level = 0.95
#/ signal.std()
t1 = np.linspace(0,cadence*signal.size,signal.size)
wave, scales, freqs, coi = wavelet.cwt((signal - signal.mean()),cadence,
wavelet=mother, dj=1/100.)
power = (np.abs(wave)) ** 2 # / scales[:,None]
period = 1/freqs
alpha = 0.0
## (variance=1 for the normalized SST)
signif, fft_theor = wavelet.significance(signal, period, scales, 0, alpha,
significance_level=sig_level, wavelet=mother)
sig95 = np.ones([1, signal.size]) * signif[:, None]
sig95 = power / sig95
## indices for stuff
idx = find_closest(period,coi.max())
## Into minutes
t1 /= 60
period /= 60
coi /= 60
return wave,scales,sig95,idx,t1,coi,period,power
def cross_wavelet(signal_1, signal_2, period, mother='morlet'):
signal_1 = (signal_1 - signal_1.mean()) / signal_1.std() # Normalizing
signal_2 = (signal_2 - signal_2.mean()) / signal_2.std() # Normalizing
W12, cross_coi, freq, signif = wavelet.xwt(signal_1, signal_2, period, dj=1/100, s0=-1, J=-1,
significance_level=0.95, wavelet=mother,
normalize=True)
cross_power = np.abs(W12)**2
cross_sig = np.ones([1, signal_1.size]) * signif[:, None]
cross_sig = cross_power / cross_sig
cross_period = 1/freq
WCT, aWCT, corr_coi, freq, sig = wavelet.wct(signal_1, signal_2, period, dj=1/100, s0=-1, J=-1,
sig=False,significance_level=0.95, wavelet=mother,
normalize=True)
cor_sig = np.ones([1, signal_1.size]) * sig[:, None]
cor_sig = np.abs(WCT) / cor_sig
cor_period = 1/freq
t1 = np.linspace(0,period*signal_1.size,signal_1.size)
idx = find_closest(cor_period,corr_coi.max())
t1 /= 60
cross_period /= 60
cor_period /= 60
cross_coi /= 60
corr_coi /= 60
return W12,WCT,aWCT,cor_period,corr_coi,cor_sig,idx,t1
"""
Figure of data analysis example
"""
### Creation of Signal and Saved
time = np.linspace(0,1000,1000)
data = np.sin(2*np.pi*time/5) + np.cos(2*np.pi*time/10)# + 5*np.random.rand(1000)
#np.savetxt('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Code/test_sig.txt',[time,data])
#
#load = np.loadtxt('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Code/test_sig.txt')
#time = load[0]
#data = load[1]
dt = time[1] - time[0]
# FFT
fft = fftpack.fft(data-np.mean(data))/time.shape[0]
freq = fftpack.fftfreq(data.shape[0],1)
fft_power = np.abs(fft)**2
# Wavelet
wave,scales,sig95,idx,t1,coi,period,power = wavel(data-data.mean(),dt)
# EMD
emd_data = np.loadtxt('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Code/emd_sig.csv', delimiter=',')
# Plotting
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(figsize=(12,8),nrows=2, ncols=2)
ax1.plot(time/60, data, 'k', linewidth=1.5)
ax1.set_xlabel('Time (minutes)')
ax1.set_ylabel('Amplitude (No Units)')
ax1.set_xlim(time.min()/60,time.max()/60)
ax2.plot(freq[0:len(time)/2],fft_power[0:len(time)/2])
ax2.set_xlabel('Frequency (Hertz)')
ax2.set_ylabel('Power (No Units)')
ax2.axis((0.0, 0.5, 0.0, 0.30000000000000004))
extent = [time.min(),time.max(),0,max(period)]
im = NonUniformImage(ax3, interpolation='nearest', extent=extent)
im.set_cmap('cubehelix_r')
im.set_data(t1, period[:idx], power[:idx,:])
ax3.images.append(im)
ax3.set_ylabel('Period (minutes)')
ax3.set_xlabel('Time (minutes)')
ax3.contour(t1, period[:idx], sig95[:idx,:], [-99,1], colors='k', linewidths=2, extent=extent)
ax3.fill(np.concatenate([t1, t1[-1:]+dt, t1[-1:]+dt,t1[:1]-dt, t1[:1]-dt]),
(np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])),
'k', alpha=0.3,hatch='/', zorder=100, antialiased=True, rasterized=True)
ax3.set_xlim(t1.min(),t1.max())
ax3.set_ylim(([min(period), period[idx]]))
ax3.axis((0.0, 16.673398306304815, 0.033366700033366704, 1.2766635538646571))
ax4.plot(time/60, emd_data[:,3], 'k', linewidth=1.5)
ax4.set_xlabel('Time (minutes)')
ax4.set_ylabel('Amplitude (No Units)')
ax4.set_xlim(time.min()/60,time.max()/60)
fig.tight_layout()
plt.show()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/signal_overview.pdf',dpi=300,bbox_inches='tight')
"""
Data Sources
"""
## Sunspot
#sun_data = readsav('/home/nabobalis/Data/Reduced_data_blue.sav')['blue_spk_cor']
#sun_dt = 6.8
#sun_pixel_arc = 0.097
#sun_bounding_box = [318,636,386,694]
#sun_background_box = [300,700,100,400]
#sun__full_extent = [0,sun_pixel_arc*sun_data.shape[1],0,sun_pixel_arc*sun_data.shape[2]]
#sun_cut_extent = [sun_bounding_box[0]*sun_pixel_arc,sun_bounding_box[1]*sun_pixel_arc,
# sun_bounding_box[2]*sun_pixel_arc,sun_bounding_box[3]*sun_pixel_arc]
#
## Pore
#pore_data = fits.getdata('/home/nabobalis/Data/gband_pore.fits')
#pore_bounding_box = [450,700,550,750]
#pore_background_box = [300,550,100,350]
#pore_dt = 2.11
#pore_pixel_arc = 0.0968063872255489 # lol
#pore__full_extent = [0,pore_pixel_arc*pore_data.shape[1],0,pore_pixel_arc*pore_data.shape[2]]
#pore_cut_extent = [pore_bounding_box[0]*pore_pixel_arc,pore_bounding_box[1]*pore_pixel_arc,
# pore_bounding_box[2]*pore_pixel_arc,pore_bounding_box[3]*pore_pixel_arc]
"""
Overview plot of the waveguides.
"""
#fig, (ax1, ax2) = plt.subplots(1,2)
#
#ax1.imshow(sun_data[0], origin='lower',interpolation='nearest',extent=sun__full_extent, cmap=plt.get_cmap('afmhot'))
#ax1.set_xlabel('Distance (Arcseconds)')
#ax1.set_ylabel('Distance (Arcseconds)')
#ax1.axes.axis((16.827111807533818, 77.654734496225558, 24.186586938884577, 84.019074696063981))
#
#ax2.imshow(pore_data[0], origin='lower',interpolation='nearest',extent=pore__full_extent, cmap=plt.get_cmap('gray'))
#ax2.set_xlabel('Distance (Arcseconds)')
#ax2.set_ylabel('Distance (Arcseconds)')
#ax2.axes.axis((23.335643499752834, 89.270284752885019, 24.592770410202537, 91.649173045713155))
#
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/overview.pdf',dpi=300,bbox_inches='tight')
#####################################
"""
This is need for the following parts!
"""
#####################################
#sun_bound = sun_data[:,sun_bounding_box[2]:sun_bounding_box[3],sun_bounding_box[0]:sun_bounding_box[1]]
#sun_cut_box = sun_data[:,sun_background_box[2]:sun_background_box[3],sun_background_box[0]:sun_background_box[1]]
#sun_cut = sun_cut_box.reshape(sun_cut_box.shape[0],sun_cut_box.shape[1]*sun_cut_box.shape[2])
#sun_time = np.linspace(0,sun_data.shape[0]*sun_dt,sun_data.shape[0])
#
#pore_bound = pore_data[:,pore_bounding_box[2]:pore_bounding_box[3],pore_bounding_box[0]:pore_bounding_box[1]]
#pore_cut_box = pore_data[:,pore_background_box[2]:pore_background_box[3],pore_background_box[0]:pore_background_box[1]]
#pore_cut = pore_cut_box.reshape(pore_cut_box.shape[0],pore_cut_box.shape[1]*pore_cut_box.shape[2])
#pore_time = np.linspace(0,pore_data.shape[0]*pore_dt,pore_data.shape[0])
#
#sun_lim_list = [3,3.5,4,4.5]
#pore_lim_list = [2,2.5,3,3.5]
#color = ['Blue','Green','Purple', 'Orange']
#sun_lim = []
#pore_lim = []
#sun_area = []
#pore_area = []
#sun_inten = []
#pore_inten = []
#
#for slim, plim in zip(sun_lim_list,pore_lim_list):
# sun_lim_inten = np.mean(sun_cut, axis = 1) - slim*np.std(sun_cut, axis = 1)
# pore_lim_inten = np.mean(pore_cut, axis = 1) - plim*np.std(pore_cut, axis = 1)
# s_area, s_inten = area_inten(sun_bound,sun_lim_inten)
# p_area, p_inten = area_inten(pore_bound,pore_lim_inten)
# sun_lim.append(sun_lim_inten)
# pore_lim.append(pore_lim_inten)
# sun_area.append(s_area)
# sun_inten.append(s_inten)
# pore_area.append(p_area)
# pore_inten.append(p_inten)
"""
Overview of Method
"""
#idx = 1
#fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(figsize=(12,8),nrows=2, ncols=2)
#
##Sunspot
#ax1.imshow(sun_bound[idx], origin = 'lower', interpolation = 'nearest', cmap=plt.get_cmap('gray'), extent=sun_cut_extent)
#ax1.set_xlabel('Distance (Arcseconds)')
#ax1.set_ylabel('Distance (Arcseconds)')
#
#for i,li in enumerate(sun_lim):
# ax1.contour(sun_bound[idx] <= li[idx], origin = 'lower', interpolation = 'nearest', colors=color[i], extent=sun_cut_extent, levels=[0,1])
#
#ax2.hist([sun_bound[idx].flatten(),sun_cut_box[idx].flatten()],bins=250, color= ['Red', 'Orange'],
# label=['Boundary', 'Background'], stacked=True, fill=True, edgecolor = 'none')
#ax2.set_xlim(1000,4500)
#ax2.set_ylim(0,10000)
#for i,li in enumerate(sun_lim):
# ax2.axvline(li[idx], color=color[i], linestyle='dashed', linewidth=2)
#
#ax2.set_xlabel('Intensity bins (counts)')
#ax2.set_ylabel('Number of pixels')
##ax2.locator_params(axis='x', nbins=6)
#ax2.legend()
#
##Pore
#ax3.imshow(pore_bound[idx], origin = 'lower', interpolation = 'nearest', cmap=plt.get_cmap('gray'), extent=pore_cut_extent)
#ax3.set_xlabel('Distance (Arcseconds)')
#ax3.set_ylabel('Distance (Arcseconds)')
#
#for i,li in enumerate(pore_lim):
# ax3.contour(pore_bound[idx] <= li[idx], origin = 'lower', interpolation = 'nearest', colors=color[i], extent=pore_cut_extent, levels=[0,1])
#
#ax4.hist([pore_bound[idx].flatten(),pore_cut_box[idx].flatten()],bins=250, color= ['Red', 'Orange'],
# label=['Boundary', 'Background'], stacked=True, fill=True, edgecolor = 'none')
#ax4.set_xlim(0.2,1.6)
#ax4.set_ylim(0,2000)
#for i,li in enumerate(pore_lim):
# ax4.axvline(li[idx], color=color[i], linestyle='dashed', linewidth=2)
#
#ax4.set_xlabel('Intensity bins (counts)')
#ax4.set_ylabel('Number of pixels')
#ax4.legend()
#
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/method_overview.pdf',dpi=300,bbox_inches='tight')
"""
Wavelet of Signals
"""
##Sunspot
#wave,scales,sig95,idx,t1,coi,period,power = wavel(sun_area[0],sun_dt)
#wave2,scales2,sig952,idx2,t12,coi2,period2,power2 = wavel(sun_area[-1],sun_dt)
#
#fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(figsize=(12,8),nrows=2, ncols=2)
## First Signal
#ax1.plot(t1, sun_area[0], 'k', linewidth=1.5)
#ax1.set_xlabel('Time (minutes)')
#ax1.set_ylabel('Area (Pixels)')
#extent = [t1.min(),t1.max(),0,max(period)]
#im = NonUniformImage(ax3, interpolation='nearest', extent=extent)
#im.set_cmap('cubehelix_r')
#im.set_data(t1, period[:idx], power[:idx,:])
#ax3.images.append(im)
#ax3.set_ylabel('Period (minutes)')
#ax3.set_xlabel('Time (minutes)')
#ax3.contour(t1, period[:idx], sig95[:idx,:], [-99,1], colors='k', linewidths=2, extent=extent)
#ax3.fill(np.concatenate([t1, t1[-1:]+sun_dt, t1[-1:]+sun_dt,t1[:1]-sun_dt, t1[:1]-sun_dt]),
# (np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='/', zorder=100, antialiased=True, rasterized=True)
#ax3.set_xlim(t1.min(),t1.max())
#ax3.set_ylim(([min(period), period[idx]]))
## Second Signal
#ax2.plot(t1, sun_area[-1], 'k', linewidth=1.5)
#ax2.set_xlabel('Time (minutes)')
#ax2.set_ylabel('Area (Pixels)')
#extent = [t1.min(),t1.max(),0,max(period2)]
#im = NonUniformImage(ax4, interpolation='nearest', extent=extent)
#im.set_cmap('cubehelix_r')
#im.set_data(t1, period2[:idx], power2[:idx,:])
#ax4.images.append(im)
#ax4.set_ylabel('Period (minutes)')
#ax4.set_xlabel('Time (minutes)')
#ax4.contour(t1, period2[:idx], sig952[:idx,:], [-99,1], colors='k', linewidths=2, extent=extent)
#ax4.fill(np.concatenate([t1, t1[-1:]+sun_dt, t1[-1:]+sun_dt,t1[:1]-sun_dt, t1[:1]-sun_dt]),
# (np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='/', zorder=100, antialiased=True, rasterized=True)
#ax4.set_xlim(t1.min(),t1.max())
#ax4.set_ylim(([min(period), period[idx]]))
#
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/sunspot_wavelet.pdf',dpi=300,bbox_inches='tight')
#
##Pore
#wave,scales,sig95,idx,t1,coi,period,power = wavel(pore_area[0],pore_dt)
#wave2,scales2,sig952,idx2,t12,coi2,period2,power2 = wavel(pore_area[-1],pore_dt)
#
#fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(figsize=(12,8),nrows=2, ncols=2)
## First Signal
#ax1.plot(t1, pore_area[0], 'k', linewidth=1.5)
#ax1.set_xlabel('Time (minutes)')
#ax1.set_ylabel('Area (Pixels)')
#extent = [t1.min(),t1.max(),0,max(period)]
#im = NonUniformImage(ax3, interpolation='nearest', extent=extent)
#im.set_cmap('cubehelix_r')
#im.set_data(t1, period[:idx], power[:idx,:])
#ax3.images.append(im)
#ax3.set_ylabel('Period (minutes)')
#ax3.set_xlabel('Time (minutes)')
#ax3.contour(t1, period[:idx], sig95[:idx,:], [-99,1], colors='k', linewidths=2, extent=extent)
#ax3.fill(np.concatenate([t1, t1[-1:]+pore_dt, t1[-1:]+pore_dt,t1[:1]-pore_dt, t1[:1]-pore_dt]),
# (np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='/', zorder=100, antialiased=True, rasterized=True)
#ax3.set_xlim(t1.min(),t1.max())
#ax3.set_ylim(([min(period), period[idx]]))
## Second Signal
#ax2.plot(t1, pore_area[-1], 'k', linewidth=1.5)
#ax2.set_xlabel('Time (minutes)')
#ax2.set_ylabel('Area (Pixels)')
#extent = [t1.min(),t1.max(),0,max(period2)]
#im = NonUniformImage(ax4, interpolation='nearest', extent=extent)
#im.set_cmap('cubehelix_r')
#im.set_data(t1, period2[:idx], power2[:idx,:])
#ax4.images.append(im)
#ax4.set_ylabel('Period (minutes)')
#ax4.set_xlabel('Time (minutes)')
#ax4.contour(t1, period2[:idx], sig952[:idx,:], [-99,1], colors='k', linewidths=2, extent=extent)
#ax4.fill(np.concatenate([t1, t1[-1:]+pore_dt, t1[-1:]+pore_dt,t1[:1]-pore_dt, t1[:1]-pore_dt]),
# (np.concatenate([coi,[1e-9], period[-1:], period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='/', zorder=100, antialiased=True, rasterized=True)
#ax4.set_xlim(t1.min(),t1.max())
#ax4.set_ylim(([min(period), period[idx]]))
#
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/pore_wavelet.pdf',dpi=300,bbox_inches='tight')
"""
Phase of Signals
"""
##Sunspot
#fig, (ax1, ax2) = plt.subplots(figsize=(20,6),nrows=1, ncols=2, sharey=False)
#
#W12,WCT,aWCT,cor_period,corr_coi,cor_sig,idx,t1 = cross_wavelet(sun_area[0], sun_inten[0], sun_dt, mother='morlet')
#W12_,WCT_,aWCT_,cor_period_,corr_coi_,cor_sig_,idx_,t1_ = cross_wavelet(sun_area[-1], sun_inten[-1], sun_dt, mother='morlet')
#extent_corr = [t1.min(),t1.max(),0,max(cor_period)]
#
#im = ax1.imshow(np.rad2deg(aWCT), origin='lower',interpolation='nearest', cmap='RdBu', extent=extent_corr)
#ax1.fill(np.concatenate([t1, t1[-1:]+sun_dt, t1[-1:]+sun_dt,t1[:1]-sun_dt, t1[:1]-sun_dt]),
# (np.concatenate([corr_coi,[1e-9], cor_period[-1:], cor_period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='x')
#ax1.set_ylim(([min(cor_period), cor_period[idx]]))
#ax1.set_xlim(t1.min(),t1.max())
#ax1.set_ylabel('Period (minutes)')
#ax1.set_xlabel('Time (minutes)')
#
#im_ = ax2.imshow(np.rad2deg(aWCT_), origin='lower',interpolation='nearest', cmap='RdBu', extent=extent_corr)
#ax2.fill(np.concatenate([t1, t1[-1:]+sun_dt, t1[-1:]+sun_dt,t1[:1]-sun_dt, t1[:1]-sun_dt]),
# (np.concatenate([corr_coi,[1e-9], cor_period[-1:], cor_period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='x')
#ax2.set_ylim(([min(cor_period), cor_period[idx]]))
#ax2.set_xlim(t1.min(),t1.max())
#ax2.set_ylabel('Period (minutes)')
#ax2.set_xlabel('Time (minutes)')
#cbar = fig.colorbar(im_, orientation='vertical')
#cbar.solids.set_edgecolor("face")
#ax1.set_aspect('auto')
#ax2.set_aspect('auto')
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/sunspot_phase.pdf',dpi=300,bbox_inches='tight')
#
##Pore
#fig, (ax1, ax2) = plt.subplots(figsize=(20,6),nrows=1, ncols=2, sharey=False)
#
#W12,WCT,aWCT,cor_period,corr_coi,cor_sig,idx,t1 = cross_wavelet(pore_area[0], pore_inten[0], sun_dt, mother='morlet')
#W12_,WCT_,aWCT_,cor_period_,corr_coi_,cor_sig_,idx_,t1_ = cross_wavelet(pore_area[-1], pore_inten[-1], sun_dt, mother='morlet')
#extent_corr = [t1.min(),t1.max(),0,max(cor_period)]
#
#im = ax1.imshow(np.rad2deg(aWCT), origin='lower',interpolation='nearest', cmap='RdBu', extent=extent_corr)
#ax1.fill(np.concatenate([t1, t1[-1:]+pore_dt, t1[-1:]+pore_dt,t1[:1]-pore_dt, t1[:1]-pore_dt]),
# (np.concatenate([corr_coi,[1e-9], cor_period[-1:], cor_period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='x')
#ax1.set_ylim(([min(cor_period), cor_period[idx]]))
#ax1.set_xlim(t1.min(),t1.max())
#ax1.set_ylabel('Period (minutes)')
#ax1.set_xlabel('Time (minutes)')
#
#im_ = ax2.imshow(np.rad2deg(aWCT_), origin='lower',interpolation='nearest', cmap='RdBu', extent=extent_corr)
#ax2.fill(np.concatenate([t1, t1[-1:]+pore_dt, t1[-1:]+pore_dt,t1[:1]-pore_dt, t1[:1]-pore_dt]),
# (np.concatenate([corr_coi,[1e-9], cor_period[-1:], cor_period[-1:], [1e-9]])),
# 'k', alpha=0.3,hatch='x')
#ax2.set_ylim(([min(cor_period), cor_period[idx]]))
#ax2.set_xlim(t1.min(),t1.max())
#ax2.set_ylabel('Period (minutes)')
#ax2.set_xlabel('Time (minutes)')
#cbar = fig.colorbar(im_, orientation='vertical')
#cbar.solids.set_edgecolor("face")
#ax1.set_aspect('auto')
#ax2.set_aspect('auto')
#fig.tight_layout()
#plt.savefig('/home/nabobalis/GitRepos/PhDThesis/Chapter2/Figs/pore_phase.pdf',dpi=300,bbox_inches='tight') | mit |
ocefpaf/ulmo | test/usgs_nwis_hdf5_test.py | 1 | 24191 | from builtins import range
import os
import shutil
import freezegun
import pandas
import pytest
from ulmo.usgs import nwis
import test_util
TEST_FILE_DIR = os.path.abspath('tmp')
@pytest.fixture
def test_file_path(request):
return os.path.join(TEST_FILE_DIR, request.function.__name__)
def setup_module(module):
if os.path.exists(TEST_FILE_DIR):
shutil.rmtree(TEST_FILE_DIR)
os.makedirs(TEST_FILE_DIR)
def teardown_module(module):
shutil.rmtree(TEST_FILE_DIR)
def test_update_site_list(test_file_path):
site_files = [
os.path.join('usgs','nwis', 'RI_daily.xml'),
os.path.join('usgs','nwis', 'RI_instantaneous.xml'),
]
for site_file in site_files:
test_site_file = test_util.get_test_file_path(site_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
sites = nwis.hdf5.get_sites(test_file_path)
assert len(sites) == 64
test_sites = {
# uses_dst == False
'01111410': {
'agency': 'USGS',
'code': '01111410',
'county': '44007',
'huc': '01090003',
'location': {
'latitude': '41.9409318',
'longitude': '-71.6481214',
'srs': 'EPSG:4326'
},
'name': 'CHEPACHET RIVER WEST OF GAZZA RD AT GAZZAVILLE, RI',
'state_code': '44',
'network': 'NWIS',
'site_type': 'ST',
'timezone_info': {
'default_tz': {
'abbreviation': 'EST',
'offset': '-05:00'
},
'dst_tz': {
'abbreviation': 'EDT',
'offset': '-04:00',
},
'uses_dst': False,
}
},
# only in RI_daily
'01116300': {
'agency': 'USGS',
'code': '01116300',
'county': '44007',
'huc': '01090004',
'location': {
'latitude': '41.7564892',
'longitude': '-71.4972824',
'srs': 'EPSG:4326'
},
'name': 'FURNACE HILL BROOK AT CRANSTON, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
},
},
# only in RI_instantaneous
'01115170': {
'agency': 'USGS',
'code': '01115170',
'county': '44007',
'huc': '01090004',
'location': {
'latitude': '41.84093269',
'longitude': '-71.584508',
'srs': 'EPSG:4326',
},
'name': 'MOSWANSICUT STREAM NR NORTH SCITUATE, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
},
},
}
for test_code, test_value in test_sites.items():
assert sites[test_code] == test_value
def test_update_site_list_with_changes(test_file_path):
site_files = [
(os.path.join('usgs','nwis', 'RI_daily.xml'), {
'agency': 'USGS',
'code': '01106000',
'county': '44005',
'huc': '01090002',
'location': {'latitude': '41.5584366',
'longitude': '-71.12921047',
'srs': 'EPSG:4326'},
'name': 'ADAMSVILLE BROOK AT ADAMSVILLE, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True}}),
(os.path.join('usgs','nwis', 'RI_daily_update.xml'), {
'agency': 'USGS',
'code': '01106000',
'county': '44005',
'huc': '01090002',
'location': {'latitude': '41.5584366',
'longitude': '-71.12921047',
'srs': 'EPSG:4326'},
'name': 'UPDATED NAME',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True}}),
]
for test_file, test_site in site_files:
test_site_file = test_util.get_test_file_path(test_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
test_code = test_site['code']
assert sites[test_code] == test_site
def test_sites_table_remains_unique(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_files = [
os.path.join('usgs','nwis', 'RI_daily.xml'),
os.path.join('usgs','nwis', 'RI_instantaneous.xml'),
]
for site_file in site_files:
test_site_file = test_util.get_test_file_path(site_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=test_site_file, autorepack=False)
with pandas.io.pytables.get_store(test_file_path) as store:
sites_df = store.select('sites')
assert len(sites_df) == len(set(sites_df.index))
def test_get_site(test_file_path):
site_code = '08068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
site = nwis.hdf5.get_site(site_code, path=test_file_path)
assert site == {
'agency': 'USGS',
'code': '08068500',
'county': '48339',
'huc': '12040102',
'location': {
'latitude': '30.11049517',
'longitude': '-95.4363275',
'srs': 'EPSG:4326'
},
'name': 'Spring Ck nr Spring, TX',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '48',
'timezone_info': {
'default_tz': {'abbreviation': 'CST', 'offset': '-06:00'},
'dst_tz': {'abbreviation': 'CDT', 'offset': '-05:00'},
'uses_dst': True
},
}
def test_get_sites_isnt_cached_between_calls(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_data_file = os.path.join('usgs', 'nwis', 'RI_daily.xml')
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(input_file=input_file, path=test_file_path,
autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert len(sites) > 0
if os.path.exists(test_file_path):
os.remove(test_file_path)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert len(sites) == 0
def test_empty_update_list_doesnt_error(test_file_path):
site_code = '98068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert sites == {}
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
sites = nwis.hdf5.get_sites(path=test_file_path)
assert sites == {}
def test_get_site_for_missing_raises_lookup(test_file_path):
site_code = '08068500'
site_data_file = os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code)
input_file = test_util.get_test_file_path(site_data_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=input_file, autorepack=False)
with pytest.raises(LookupError):
missing_code = '98068500'
nwis.hdf5.get_site(missing_code, path=test_file_path)
def test_non_usgs_site(test_file_path):
site_code = '07335390'
site_data_file = test_util.get_test_file_path(
os.path.join('usgs','nwis', 'site_%s_instantaneous.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all',
path=test_file_path, input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
assert len(site_data['00062:00011']['values']) > 1000
def test_remove_values(test_file_path):
from datetime import datetime
site_code = '07335390'
parameter_code = '00062:00011'
values_to_remove = {
parameter_code: ['2012-10-25 06:00', '2012-10-25 23:00',
'2012-10-30 15:00:00', datetime(2012, 11, 15, 13)]
}
site_data_file = test_util.get_test_file_path(
os.path.join('usgs','nwis', 'site_%s_instantaneous.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all',
path=test_file_path, input_file=site_data_file, autorepack=False)
nwis.hdf5.remove_values(site_code, values_to_remove, path=test_file_path,
autorepack=False)
test_values = [
dict(datetime="2012-10-25T01:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-10-25T18:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-10-30T10:00:00-05:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
dict(datetime="2012-11-15T07:00:00-06:00", last_checked=None, last_modified=None, qualifiers="P", value=None),
]
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_values = site_data[parameter_code]['values']
for test_value in test_values:
assert test_value in site_values
def test_remove_values_with_missing_code(test_file_path):
site_code = '08068500'
values_to_remove = {
'12345:0000': ['2010-01-01'],
'00010:00002': ['2012-12-10']
}
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, period='all', path=test_file_path,
input_file=site_data_file, autorepack=False)
nwis.hdf5.remove_values(site_code, values_to_remove, path=test_file_path,
autorepack=False)
test_value = dict(datetime="2012-12-10T00:00:00", last_checked=None, last_modified=None, qualifiers="P", value=None)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_values = site_data['00010:00002']['values']
assert test_value in site_values
def test_site_data_is_sorted(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
values = site_data['00060:00003']['values']
assert all(
values[i]['datetime'] < values[i+1]['datetime']
for i in range(len(values) - 1))
def test_update_site_data_basic_data_parsing(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join('usgs','nwis', 'site_%s_daily.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
test_values = [
dict(datetime="1964-01-23T00:00:00", last_checked=None, last_modified=None, qualifiers="A", value='240'),
dict(datetime="1964-08-22T00:00:00", last_checked=None, last_modified=None, qualifiers="A", value='7.9'),
dict(datetime="2011-12-15T00:00:00", last_checked=None, last_modified=None, qualifiers="P Eqp", value='-999999'),
dict(datetime="2012-01-15T00:00:00", last_checked=None, last_modified=None, qualifiers="P e", value='97'),
dict(datetime="2012-06-05T00:00:00", last_checked=None, last_modified=None, qualifiers="P", value='74'),
]
site_values = site_data['00060:00003']['values']
for test_value in test_values:
assert test_value in site_values
def test_site_data_filter_by_one_parameter_code(test_file_path):
site_code = '08068500'
parameter_code = '00065:00003'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
all_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_data = nwis.hdf5.get_site_data(site_code, parameter_code=parameter_code, path=test_file_path)
assert site_data[parameter_code] == all_site_data[parameter_code]
def test_site_data_filter_by_multiple_parameter_codes(test_file_path):
site_code = '08068500'
parameter_code = ['00060:00003', '00065:00003', 'nonexistent']
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
all_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
site_data = nwis.hdf5.get_site_data(site_code, parameter_code=parameter_code, path=test_file_path)
for code in parameter_code:
if code in list(site_data.keys()):
assert site_data[code] == all_site_data[code]
def test_site_data_update_site_list_with_multiple_updates(test_file_path):
first_timestamp = '2013-01-01T01:01:01'
second_timestamp = '2013-02-02T02:02:02'
site_code = '01117800'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
with test_util.mocked_urls(site_data_file):
with freezegun.freeze_time(first_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
last_value = site_data['00060:00003']['values'][-1]
assert first_timestamp == last_value['last_checked'] == last_value['last_modified']
update_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_update.xml' % site_code))
with test_util.mocked_urls(update_data_file):
with freezegun.freeze_time(second_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
updated_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
updated_values = updated_site_data['00060:00003']['values']
last_value = updated_values[-1]
assert last_value['last_checked'] != first_timestamp
assert second_timestamp == last_value['last_checked'] == last_value['last_modified']
original_timestamp = first_timestamp
modified_timestamp = second_timestamp
test_values = [
dict(datetime="1963-01-23T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='7'),
dict(datetime="1964-01-23T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='1017'),
dict(datetime="1964-01-24T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="A", value='191'),
dict(datetime="1964-08-22T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="A", value='7.9'),
dict(datetime="1969-05-26T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='1080'),
dict(datetime="2011-12-06T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='222'),
dict(datetime="2011-12-15T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="P Eqp", value='-999999'),
dict(datetime="2012-01-15T00:00:00", last_checked=original_timestamp, last_modified=original_timestamp, qualifiers="P e", value='97'),
dict(datetime="2012-05-25T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='56'),
dict(datetime="2012-05-26T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='55'),
dict(datetime="2012-05-27T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="A", value='52'),
dict(datetime="2012-05-28T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='48'),
dict(datetime="2012-05-29T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1099'),
dict(datetime="2012-05-30T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1098'),
dict(datetime="2012-05-31T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='41'),
dict(datetime="2012-06-01T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='37'),
dict(datetime="2012-06-02T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1097'),
dict(datetime="2012-06-03T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='69'),
dict(datetime="2012-06-04T00:00:00", last_checked=modified_timestamp, last_modified=original_timestamp, qualifiers="P", value='81'),
dict(datetime="2012-06-05T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='1071'),
dict(datetime="2012-06-06T00:00:00", last_checked=modified_timestamp, last_modified=modified_timestamp, qualifiers="P", value='2071'),
]
for test_value in test_values:
assert updated_values.index(test_value) >= 0
def test_last_refresh_gets_updated(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
first_timestamp = '2013-01-01T01:01:01'
second_timestamp = '2013-02-02T02:02:02'
forth_timestamp = '2013-03-03T03:03:03'
site_code = '01117800'
site_data_file = test_util.get_test_file_path(
'usgs/nwis/site_%s_daily.xml' % site_code)
with test_util.mocked_urls(site_data_file):
with freezegun.freeze_time(first_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
first_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert first_refresh == first_timestamp
with freezegun.freeze_time(second_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
second_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert second_refresh == second_timestamp
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
third_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert third_refresh == None
with freezegun.freeze_time(forth_timestamp):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
autorepack=False)
forth_refresh = nwis.hdf5._get_last_refresh(site_code, test_file_path)
assert forth_refresh is not None
assert forth_refresh == forth_timestamp
def test_update_site_data_updates_site_list(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_update.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
site = nwis.hdf5.get_site(site_code, path=test_file_path)
test_site = {
'agency': 'USGS',
'code': '01117800',
'county': '44009',
'huc': '01090005',
'location': {
'latitude': '41.5739884',
'longitude': '-71.72062318',
'srs': 'EPSG:4326'
},
'name': 'WOOD RIVER NEAR ARCADIA, RI',
'network': 'NWIS',
'site_type': 'ST',
'state_code': '44',
'timezone_info': {
'default_tz': {'abbreviation': 'EST', 'offset': '-05:00'},
'dst_tz': {'abbreviation': 'EDT', 'offset': '-04:00'},
'uses_dst': True
}
}
assert site == test_site
def test_handles_empty_updates(test_file_path):
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily.xml' % site_code))
empty_site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_empty.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=empty_site_data_file, autorepack=False)
empty_site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
assert empty_site_data['00060:00003']['values'] == []
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file, autorepack=False)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=empty_site_data_file, autorepack=False)
site_data = nwis.hdf5.get_site_data(site_code, path=test_file_path)
values = site_data['00060:00003']['values']
test_values = [
dict(datetime="1964-01-23T00:00:00", last_checked=None, last_modified=None, qualifiers="A", value='240'),
]
for test_value in test_values:
assert values.index(test_value) >= 0
def test_file_size_doesnt_balloon_with_update_site_data(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_code = '01117800'
site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily.xml' % site_code))
update_site_data_file = test_util.get_test_file_path(os.path.join(
'usgs', 'nwis', 'site_%s_daily_update.xml' % site_code))
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=site_data_file)
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=update_site_data_file)
original_size = os.path.getsize(test_file_path)
for i in range(20):
nwis.hdf5.update_site_data(site_code, path=test_file_path,
input_file=update_site_data_file)
expected_size = original_size * 1.01
assert os.path.getsize(test_file_path) <= expected_size
def test_file_size_doesnt_balloon_with_update_site_list(test_file_path):
test_file_path = os.path.join(test_file_path, "test.h5")
site_list_file = test_util.get_test_file_path(os.path.join('usgs', 'nwis', 'RI_daily.xml'))
updated_site_list_file = test_util.get_test_file_path(os.path.join('usgs', 'nwis', 'RI_daily.xml'))
nwis.hdf5.update_site_list(path=test_file_path,
input_file=site_list_file)
nwis.hdf5.update_site_list(path=test_file_path,
input_file=updated_site_list_file)
original_size = os.path.getsize(test_file_path)
for i in range(3):
nwis.hdf5.update_site_list(path=test_file_path,
input_file=updated_site_list_file)
expected_size = original_size * 1.01
assert os.path.getsize(test_file_path) <= expected_size
| bsd-3-clause |
abhisg/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
pthaike/keras | examples/kaggle_otto_nn.py | 70 | 3775 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| mit |
murali-munna/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 130 | 6059 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
# Test NNDSVD behaviour on negative input
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
# Test that NNDSVD does not return negative values
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
# the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
# Test model fit behaviour on negative input
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
# Test that the fit is not too far away
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
# Test that NMF.transform returns close values
# (transform uses scipy.optimize.nnls for now)
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF(random_state=42)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
| bsd-3-clause |
feranick/Pi-bot | Old/3_ML-splrcbxyz/piRC_ML.py | 1 | 10653 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
**********************************************************
*
* PiRC - Machine learning train and predict
* version: 20170518b
*
* By: Nicola Ferralis <[email protected]>
*
***********************************************************
'''
print(__doc__)
import numpy as np
import sys, os.path, os, getopt, glob, csv
from time import sleep, time
from os.path import exists, splitext
from os import rename
from datetime import datetime, date
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.externals import joblib
from sklearn.preprocessing import StandardScaler
#**********************************************
''' MultiClassReductor '''
#**********************************************
class MultiClassReductor():
def __self__(self):
self.name = name
totalClass = [[-1,-1],[-1,0],[-1,1],[0,-1],[0,0],[0,1],[1,-1],[1,0],[1,1]]
def transform(self,y):
Cl = np.zeros(y.shape[0])
for j in range(len(y)):
Cl[j] = self.totalClass.index(np.array(y[j]).tolist())
return Cl
def inverse_transform(self,a):
return self.totalClass[int(a)]
#**********************************************
''' General parameters'''
#**********************************************
class params:
timeDelay = 0.25
filename = 'Training_splrcbxyz.txt'
runFullAuto = False
debug = False # do not activate sensors or motors in debug mode
#**********************************************
''' Neural Networks'''
#**********************************************
class nnDef:
runNN = True
nnAlwaysRetrain = False
syncTimeLimit = 20 # time in seconds for NN model synchronization
syncTrainModel = False
saveNewTrainingData = False
useRegressor = False
scaler = StandardScaler()
mlp = MultiClassReductor()
''' Solver for NN
lbfgs preferred for small datasets
(alternatives: 'adam' or 'sgd') '''
nnSolver = 'lbfgs'
nnNeurons = 10 #default = 10
#**********************************************
''' Main '''
#**********************************************
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "rtch:", ["run", "train", "collect", "help"])
except:
usage()
sys.exit(2)
if opts == []:
usage()
sys.exit(2)
try:
sys.argv[3]
if sys.argv[3] in ("-C", "--Classifier"):
nnDef.useRegressor = False
elif sys.argv[3] in ("-R", "--Regressor"):
nnDef.useRegressor = True
except:
nnDef.useRegressor = False
for o, a in opts:
if o in ("-r" , "--run"):
try:
runAuto(sys.argv[2],params.runFullAuto)
except:
exitProg()
if o in ("-t" , "--train"):
try:
runTrain(sys.argv[2])
except:
sys.exit(2)
if o in ("-c" , "--collect"):
try:
writeTrainFile()
except:
exitProg()
#*************************************************
''' runAuto '''
''' Use ML models to predict steer and power '''
#*************************************************
def runAuto(trainFile, type):
trainFileRoot = os.path.splitext(trainFile)[0]
Cl, sensors = readTrainFile(trainFile)
clf = runNN(sensors, Cl, trainFileRoot)
fullStop(False)
syncTime = time()
while True:
if time() - syncTime > nnDef.syncTimeLimit and nnDef.syncTrainModel == True:
print(" Reloading NN model...")
clf = runNN(sensors, Cl, trainFileRoot)
print(" Synchronizing NN model...\n")
os.system("./syncTFile.sh " + trainFileRoot + " &")
syncTime = time()
if type == False:
print(" Running \033[1mPartial Auto\033[0m Mode\n")
s, p = predictDrive(clf)
drive(s,p)
sleep(params.timeDelay)
else:
print(" Running \033[1mFull Auto\033[0m Mode\n")
dt=0
t1=time()
while dt < 0.5:
s, p = predictDrive(clf)
if p != 0:
dt = 0
drive(s,p)
else:
dt = time() - t1
sleep(params.timeDelay)
drive(0, 1)
sleep(0.5)
drive(0, 0)
#*************************************************
''' runTrain '''
''' Use ML models to predict steer and power '''
#*************************************************
def runTrain(trainFile):
trainFileRoot = os.path.splitext(trainFile)[0]
Cl, sensors = readTrainFile(trainFile)
nnDef.nnAlwaysRetrain = True
runNN(sensors, Cl, trainFileRoot)
#*************************************************
''' write training file from sensors '''
#*************************************************
def writeTrainFile():
while True:
import piRC_lib
s,p,l,r,c,b,x,y,z = piRC_lib.readAllSensors()
print(' S={0:.0f}, P={1:.0f}, L={2:.0f}, R={3:.0f}, C={4:.0f}, B={5:.0f}, X={6:.3f}, Y={7:.3f}, Z={8:.3f}'.format(s,p,l,r,c,b,x,y,z))
with open(params.filename, "a") as sum_file:
sum_file.write('{0:.0f}\t{1:.0f}\t{2:.0f}\t{3:.0f}\t{4:.0f}\t{5:.0f}\t{6:.3f}\t{7:.3f}\t{8:.3f}\n'.format(s,p,l,r,c,b,x,y,z))
#*************************************************
''' read Train File '''
#*************************************************
def readTrainFile(trainFile):
try:
with open(trainFile, 'r') as f:
M = np.loadtxt(f, unpack =False)
except:
print('\033[1m' + ' Training file not found \n' + '\033[0m')
return
steer = M[:,0]
power = M[:,1]
Cl = M[:,[0,1]]
sensors = np.delete(M,np.s_[0:2],1)
return Cl, sensors
#********************************************************************************
''' Run Neural Network '''
#********************************************************************************
def runNN(sensors, Cl, Root):
if nnDef.useRegressor is False:
nnTrainedData = Root + '.nnModelC.pkl'
else:
nnTrainedData = Root + '.nnModelR.pkl'
print(' Running Neural Network: multi-layer perceptron (MLP) - (solver: ' + nnDef.nnSolver + ')...')
sensors = nnDef.scaler.fit_transform(sensors)
if nnDef.useRegressor is False:
Y = nnDef.mlp.transform(Cl)
else:
Y = Cl
try:
if nnDef.nnAlwaysRetrain == False:
with open(nnTrainedData):
print(' Opening NN training model...\n')
clf = joblib.load(nnTrainedData)
else:
raise ValueError('Force NN retraining.')
except:
#**********************************************
''' Retrain data if not available'''
#**********************************************
print(' Retraining NN model...\n')
if nnDef.useRegressor is False:
clf = MLPClassifier(solver=nnDef.nnSolver, alpha=1e-5, hidden_layer_sizes=(nnDef.nnNeurons,), random_state=1)
else:
clf = MLPRegressor(solver=nnDef.nnSolver, alpha=1e-5, hidden_layer_sizes=(nnDef.nnNeurons,), random_state=9)
clf.fit(sensors, Y)
joblib.dump(clf, nnTrainedData)
return clf
#*************************************************
''' Predict drive pattern '''
#*************************************************
def predictDrive(clf):
np.set_printoptions(suppress=True)
sp = [0,0]
if params.debug is True:
s,p,l,r,c,b,x,y,z = [-1,-1,116,117,111,158,0.224,0.108,1.004]
else:
import piRC_lib
s,p,l,r,c,b,x,y,z = piRC_lib.readAllSensors()
print(' S={0:.0f}, P={1:.0f}, L={2:.0f}, R={3:.0f}, C={4:.0f}, B={5:.0f}, X={6:.3f}, Y={7:.3f}, Z={8:.3f}'.format(s,p,l,r,c,b,x,y,z))
nowsensors = np.array([[round(l,0),round(r,0),round(c,0),round(b,0),round(x,3),round(y,3),round(z,3)]]).reshape(1,-1)
if nnDef.useRegressor is False:
nowsensors = nnDef.scaler.transform(nowsensors)
try:
sp[0] = nnDef.mlp.inverse_transform(clf.predict(nowsensors)[0])[0]
sp[1] = nnDef.mlp.inverse_transform(clf.predict(nowsensors)[0])[1]
except:
sp = [0,0]
print('\033[1m' + '\n Predicted classification value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),')')
prob = clf.predict_proba(nowsensors)[0].tolist()
print(' (probability = ' + str(round(100*max(prob),4)) + '%)\033[0m\n')
else:
sp = clf.predict(nowsensors)[0]
print('\033[1m' + '\n Predicted regression value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),')')
for k in range(2):
if sp[k] >= 1:
sp[k] = 1
elif sp[k] <= -1:
sp[k] = -1
else:
sp[k] = 0
print('\033[1m' + ' Predicted regression value (Neural Networks) = ( S=',str(sp[0]),', P=',str(sp[1]),') Normalized\n')
if nnDef.saveNewTrainingData is True:
with open(params.filename, "a") as sum_file:
sum_file.write('{0:.0f}\t{1:.0f}\t{2:.0f}\t{3:.0f}\t{4:.0f}\t{5:.0f}\t{6:.3f}\t{7:.3f}\t{8:.3f}\n'.format(sp[0],sp[1],l,r,c,b,x,y,z))
return sp[0], sp[1]
#*************************************************
''' Drive '''
#*************************************************
def drive(s,p):
if params.debug is False:
import piRC_lib
piRC_lib.runMotor(0,s)
piRC_lib.runMotor(1,p)
def fullStop(type):
if params.debug is False:
import piRC_lib
piRC_lib.fullStop(type)
#*************************************************
''' Lists the program usage '''
#*************************************************
def usage():
print('\n Usage:')
print('\n Training (Classifier):\n python3 piRC_ML.py -t <train file>')
print('\n Prediction (Classifier):\n python3 piRC_ML.py -r <train file>')
print('\n Training (Regression):\n python3 piRC_ML.py -t <train file> -R')
print('\n Prediction (Regression):\n python3 piRC_ML.py -r <train file> -R')
print('\n Collect data from sensors into training file:\n python3 piRC_ML.py -c')
print('\n (Separate trained models are created for regression and classification\n')
print(' Requires python 3.x. Not compatible with python 2.x\n')
def exitProg():
fullStop(True)
sys.exit(2)
#*************************************************
''' Main initialization routine '''
#*************************************************
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
kalvdans/scipy | scipy/stats/_multivariate.py | 12 | 112182 | #
# Author: Joris Vankerschaver 2013
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from scipy.misc import doccer
from scipy.special import gammaln, psi, multigammaln, xlogy, entr
from scipy._lib._util import check_random_state
from scipy.linalg.blas import drot
from ._discrete_distns import binom
__all__ = ['multivariate_normal',
'matrix_normal',
'dirichlet',
'wishart',
'invwishart',
'multinomial',
'special_ortho_group',
'ortho_group',
'random_correlation']
_LOG_2PI = np.log(2 * np.pi)
_LOG_2 = np.log(2)
_LOG_PI = np.log(np.pi)
_doc_random_state = """\
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
"""
def _squeeze_output(out):
"""
Remove single-dimensional entries from array and convert to scalar,
if necessary.
"""
out = out.squeeze()
if out.ndim == 0:
out = out[()]
return out
def _eigvalsh_to_eps(spectrum, cond=None, rcond=None):
"""
Determine which eigenvalues are "small" given the spectrum.
This is for compatibility across various linear algebra functions
that should agree about whether or not a Hermitian matrix is numerically
singular and what is its numerical matrix rank.
This is designed to be compatible with scipy.linalg.pinvh.
Parameters
----------
spectrum : 1d ndarray
Array of eigenvalues of a Hermitian matrix.
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
Returns
-------
eps : float
Magnitude cutoff for numerical negligibility.
"""
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = spectrum.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
eps = cond * np.max(abs(spectrum))
return eps
def _pinv_1d(v, eps=1e-5):
"""
A helper function for computing the pseudoinverse.
Parameters
----------
v : iterable of numbers
This may be thought of as a vector of eigenvalues or singular values.
eps : float
Values with magnitude no greater than eps are considered negligible.
Returns
-------
v_pinv : 1d float ndarray
A vector of pseudo-inverted numbers.
"""
return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float)
class _PSD(object):
"""
Compute coordinated functions of a symmetric positive semidefinite matrix.
This class addresses two issues. Firstly it allows the pseudoinverse,
the logarithm of the pseudo-determinant, and the rank of the matrix
to be computed using one call to eigh instead of three.
Secondly it allows these functions to be computed in a way
that gives mutually compatible results.
All of the functions are computed with a common understanding as to
which of the eigenvalues are to be considered negligibly small.
The functions are designed to coordinate with scipy.linalg.pinvh()
but not necessarily with np.linalg.det() or with np.linalg.matrix_rank().
Parameters
----------
M : array_like
Symmetric positive semidefinite matrix (2-D).
cond, rcond : float, optional
Cutoff for small eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are
considered zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower
or upper triangle of M. (Default: lower)
check_finite : bool, optional
Whether to check that the input matrices contain only finite
numbers. Disabling may give a performance gain, but may result
in problems (crashes, non-termination) if the inputs do contain
infinities or NaNs.
allow_singular : bool, optional
Whether to allow a singular matrix. (Default: True)
Notes
-----
The arguments are similar to those of scipy.linalg.pinvh().
"""
def __init__(self, M, cond=None, rcond=None, lower=True,
check_finite=True, allow_singular=True):
# Compute the symmetric eigendecomposition.
# Note that eigh takes care of array conversion, chkfinite,
# and assertion that the matrix is square.
s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite)
eps = _eigvalsh_to_eps(s, cond, rcond)
if np.min(s) < -eps:
raise ValueError('the input matrix must be positive semidefinite')
d = s[s > eps]
if len(d) < len(s) and not allow_singular:
raise np.linalg.LinAlgError('singular matrix')
s_pinv = _pinv_1d(s, eps)
U = np.multiply(u, np.sqrt(s_pinv))
# Initialize the eagerly precomputed attributes.
self.rank = len(d)
self.U = U
self.log_pdet = np.sum(np.log(d))
# Initialize an attribute to be lazily computed.
self._pinv = None
@property
def pinv(self):
if self._pinv is None:
self._pinv = np.dot(self.U, self.U.T)
return self._pinv
class multi_rv_generic(object):
"""
Class which encapsulates common functionality between all multivariate
distributions.
"""
def __init__(self, seed=None):
super(multi_rv_generic, self).__init__()
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _get_random_state(self, random_state):
if random_state is not None:
return check_random_state(random_state)
else:
return self._random_state
class multi_rv_frozen(object):
"""
Class which encapsulates common functionality between all frozen
multivariate distributions.
"""
@property
def random_state(self):
return self._dist._random_state
@random_state.setter
def random_state(self, seed):
self._dist._random_state = check_random_state(seed)
_mvn_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
Whether to allow a singular covariance matrix. (Default: False)
"""
_mvn_doc_callparams_note = \
"""Setting the parameter `mean` to `None` is equivalent to having `mean`
be the zero-vector. The parameter `cov` can be a scalar, in which case
the covariance matrix is the identity times that value, a vector of
diagonal entries for the covariance matrix, or a two-dimensional
array_like.
"""
_mvn_doc_frozen_callparams = ""
_mvn_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
mvn_docdict_params = {
'_mvn_doc_default_callparams': _mvn_doc_default_callparams,
'_mvn_doc_callparams_note': _mvn_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
mvn_docdict_noparams = {
'_mvn_doc_default_callparams': _mvn_doc_frozen_callparams,
'_mvn_doc_callparams_note': _mvn_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multivariate_normal_gen(multi_rv_generic):
r"""
A multivariate normal random variable.
The `mean` keyword specifies the mean. The `cov` keyword specifies the
covariance matrix.
Methods
-------
``pdf(x, mean=None, cov=1, allow_singular=False)``
Probability density function.
``logpdf(x, mean=None, cov=1, allow_singular=False)``
Log of the probability density function.
``rvs(mean=None, cov=1, size=1, random_state=None)``
Draw random samples from a multivariate normal distribution.
``entropy()``
Compute the differential entropy of the multivariate normal.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" multivariate normal
random variable:
rv = multivariate_normal(mean=None, cov=1, allow_singular=False)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_mvn_doc_callparams_note)s
The covariance matrix `cov` must be a (symmetric) positive
semi-definite matrix. The determinant and inverse of `cov` are computed
as the pseudo-determinant and pseudo-inverse, respectively, so
that `cov` does not need to have full rank.
The probability density function for `multivariate_normal` is
.. math::
f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}}
\exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right),
where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix,
and :math:`k` is the dimension of the space where :math:`x` takes values.
.. versionadded:: 0.14.0
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import multivariate_normal
>>> x = np.linspace(0, 5, 10, endpoint=False)
>>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129,
0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349])
>>> fig1 = plt.figure()
>>> ax = fig1.add_subplot(111)
>>> ax.plot(x, y)
The input quantiles can be any shape of array, as long as the last
axis labels the components. This allows us for instance to
display the frozen pdf for a non-isotropic random variable in 2D as
follows:
>>> x, y = np.mgrid[-1:1:.01, -1:1:.01]
>>> pos = np.dstack((x, y))
>>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
>>> fig2 = plt.figure()
>>> ax2 = fig2.add_subplot(111)
>>> ax2.contourf(x, y, rv.pdf(pos))
"""
def __init__(self, seed=None):
super(multivariate_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, mvn_docdict_params)
def __call__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
See `multivariate_normal_frozen` for more information.
"""
return multivariate_normal_frozen(mean, cov,
allow_singular=allow_singular,
seed=seed)
def _process_parameters(self, dim, mean, cov):
"""
Infer dimensionality from mean or covariance matrix, ensure that
mean and covariance are full vector resp. matrix.
"""
# Try to infer dimensionality
if dim is None:
if mean is None:
if cov is None:
dim = 1
else:
cov = np.asarray(cov, dtype=float)
if cov.ndim < 2:
dim = 1
else:
dim = cov.shape[0]
else:
mean = np.asarray(mean, dtype=float)
dim = mean.size
else:
if not np.isscalar(dim):
raise ValueError("Dimension of random variable must be a scalar.")
# Check input sizes and return full arrays for mean and cov if necessary
if mean is None:
mean = np.zeros(dim)
mean = np.asarray(mean, dtype=float)
if cov is None:
cov = 1.0
cov = np.asarray(cov, dtype=float)
if dim == 1:
mean.shape = (1,)
cov.shape = (1, 1)
if mean.ndim != 1 or mean.shape[0] != dim:
raise ValueError("Array 'mean' must be a vector of length %d." % dim)
if cov.ndim == 0:
cov = cov * np.eye(dim)
elif cov.ndim == 1:
cov = np.diag(cov)
elif cov.ndim == 2 and cov.shape != (dim, dim):
rows, cols = cov.shape
if rows != cols:
msg = ("Array 'cov' must be square if it is two dimensional,"
" but cov.shape = %s." % str(cov.shape))
else:
msg = ("Dimension mismatch: array 'cov' is of shape %s,"
" but 'mean' is a vector of length %d.")
msg = msg % (str(cov.shape), len(mean))
raise ValueError(msg)
elif cov.ndim > 2:
raise ValueError("Array 'cov' must be at most two-dimensional,"
" but cov.ndim = %d" % cov.ndim)
return dim, mean, cov
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x[np.newaxis]
elif x.ndim == 1:
if dim == 1:
x = x[:, np.newaxis]
else:
x = x[np.newaxis, :]
return x
def _logpdf(self, x, mean, prec_U, log_det_cov, rank):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
prec_U : ndarray
A decomposition such that np.dot(prec_U, prec_U.T)
is the precision matrix, i.e. inverse of the covariance matrix.
log_det_cov : float
Logarithm of the determinant of the covariance matrix
rank : int
Rank of the covariance matrix.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
dev = x - mean
maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1)
return -0.5 * (rank * _LOG_2PI + log_det_cov + maha)
def logpdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Log of the multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)
return _squeeze_output(out)
def pdf(self, x, mean=None, cov=1, allow_singular=False):
"""
Multivariate normal probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_mvn_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
x = self._process_quantiles(x, dim)
psd = _PSD(cov, allow_singular=allow_singular)
out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank))
return _squeeze_output(out)
def rvs(self, mean=None, cov=1, size=1, random_state=None):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
%(_mvn_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
random_state = self._get_random_state(random_state)
out = random_state.multivariate_normal(mean, cov, size)
return _squeeze_output(out)
def entropy(self, mean=None, cov=1):
"""
Compute the differential entropy of the multivariate normal.
Parameters
----------
%(_mvn_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
Notes
-----
%(_mvn_doc_callparams_note)s
"""
dim, mean, cov = self._process_parameters(None, mean, cov)
_, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov)
return 0.5 * logdet
multivariate_normal = multivariate_normal_gen()
class multivariate_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, cov=1, allow_singular=False, seed=None):
"""
Create a frozen multivariate normal distribution.
Parameters
----------
mean : array_like, optional
Mean of the distribution (default zero)
cov : array_like, optional
Covariance matrix of the distribution (default one)
allow_singular : bool, optional
If this flag is True then tolerate a singular
covariance matrix (default False).
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
When called with the default parameters, this will create a 1D random
variable with mean 0 and covariance 1:
>>> from scipy.stats import multivariate_normal
>>> r = multivariate_normal()
>>> r.mean
array([ 0.])
>>> r.cov
array([[1.]])
"""
self._dist = multivariate_normal_gen(seed)
self.dim, self.mean, self.cov = self._dist._process_parameters(
None, mean, cov)
self.cov_info = _PSD(self.cov, allow_singular=allow_singular)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.mean, self.cov_info.U,
self.cov_info.log_pdet, self.cov_info.rank)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.cov, size, random_state)
def entropy(self):
"""
Computes the differential entropy of the multivariate normal.
Returns
-------
h : scalar
Entropy of the multivariate normal distribution
"""
log_pdet = self.cov_info.log_pdet
rank = self.cov_info.rank
return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = multivariate_normal_gen.__dict__[name]
method_frozen = multivariate_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, mvn_docdict_params)
_matnorm_doc_default_callparams = """\
mean : array_like, optional
Mean of the distribution (default: `None`)
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: `1`)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: `1`)
"""
_matnorm_doc_callparams_note = \
"""If `mean` is set to `None` then a matrix of zeros is used for the mean.
The dimensions of this matrix are inferred from the shape of `rowcov` and
`colcov`, if these are provided, or set to `1` if ambiguous.
`rowcov` and `colcov` can be two-dimensional array_likes specifying the
covariance matrices directly. Alternatively, a one-dimensional array will
be be interpreted as the entries of a diagonal matrix, and a scalar or
zero-dimensional array will be interpreted as this value times the
identity matrix.
"""
_matnorm_doc_frozen_callparams = ""
_matnorm_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
matnorm_docdict_params = {
'_matnorm_doc_default_callparams': _matnorm_doc_default_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
matnorm_docdict_noparams = {
'_matnorm_doc_default_callparams': _matnorm_doc_frozen_callparams,
'_matnorm_doc_callparams_note': _matnorm_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class matrix_normal_gen(multi_rv_generic):
r"""
A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
``pdf(X, mean=None, rowcov=1, colcov=1)``
Probability density function.
``logpdf(X, mean=None, rowcov=1, colcov=1)``
Log of the probability density function.
``rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)``
Draw random samples.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
rv = matrix_normal(mean=None, rowcov=1, colcov=1)
- Frozen object with the same methods but holding the given
mean and covariance fixed.
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
"""
def __init__(self, seed=None):
super(matrix_normal_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the"
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the"
"same number of columns.")
else:
mean = np.zeros((numrows,numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.rollaxis(X-mean, axis=-1, start=0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""
Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""
Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
std_norm = random_state.standard_normal(size=(dims[1],size,dims[0]))
roll_rvs = np.tensordot(colchol, np.dot(std_norm, rowchol.T), 1)
out = np.rollaxis(roll_rvs.T, axis=1, start=0) + mean[np.newaxis,:,:]
if size == 1:
#out = np.squeeze(out, axis=0)
out = out.reshape(mean.shape)
return out
matrix_normal = matrix_normal_gen()
class matrix_normal_frozen(multi_rv_frozen):
def __init__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""
Create a frozen matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
seed : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates.
If None (or np.random), the global np.random state is used.
Default is None.
Examples
--------
>>> from scipy.stats import matrix_normal
>>> distn = matrix_normal(mean=np.zeros((3,3)))
>>> X = distn.rvs(); X
array([[-0.02976962, 0.93339138, -0.09663178],
[ 0.67405524, 0.28250467, -0.93308929],
[-0.31144782, 0.74535536, 1.30412916]])
>>> distn.pdf(X)
2.5160642368346784e-05
>>> distn.logpdf(X)
-10.590229595124615
"""
self._dist = matrix_normal_gen(seed)
self.dims, self.mean, self.rowcov, self.colcov = \
self._dist._process_parameters(mean, rowcov, colcov)
self.rowpsd = _PSD(self.rowcov, allow_singular=False)
self.colpsd = _PSD(self.colcov, allow_singular=False)
def logpdf(self, X):
X = self._dist._process_quantiles(X, self.dims)
out = self._dist._logpdf(self.dims, X, self.mean, self.rowpsd.U,
self.rowpsd.log_pdet, self.colpsd.U,
self.colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X):
return np.exp(self.logpdf(X))
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.mean, self.rowcov, self.colcov, size,
random_state)
# Set frozen generator docstrings from corresponding docstrings in
# matrix_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs']:
method = matrix_normal_gen.__dict__[name]
method_frozen = matrix_normal_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, matnorm_docdict_params)
_dirichlet_doc_default_callparams = """\
alpha : array_like
The concentration parameters. The number of entries determines the
dimensionality of the distribution.
"""
_dirichlet_doc_frozen_callparams = ""
_dirichlet_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
dirichlet_docdict_params = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams,
'_doc_random_state': _doc_random_state
}
dirichlet_docdict_noparams = {
'_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams,
'_doc_random_state': _doc_random_state
}
def _dirichlet_check_parameters(alpha):
alpha = np.asarray(alpha)
if np.min(alpha) <= 0:
raise ValueError("All parameters must be greater than 0")
elif alpha.ndim != 1:
raise ValueError("Parameter vector 'a' must be one dimensional, "
"but a.shape = %s." % (alpha.shape, ))
return alpha
def _dirichlet_check_input(alpha, x):
x = np.asarray(x)
if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]:
raise ValueError("Vector 'x' must have either the same number "
"of entries as, or one entry fewer than, "
"parameter vector 'a', but alpha.shape = %s "
"and x.shape = %s." % (alpha.shape, x.shape))
if x.shape[0] != alpha.shape[0]:
xk = np.array([1 - np.sum(x, 0)])
if xk.ndim == 1:
x = np.append(x, xk)
elif xk.ndim == 2:
x = np.vstack((x, xk))
else:
raise ValueError("The input must be one dimensional or a two "
"dimensional matrix containing the entries.")
if np.min(x) <= 0:
raise ValueError("Each entry in 'x' must be greater than zero.")
if np.max(x) > 1:
raise ValueError("Each entry in 'x' must be smaller or equal one.")
if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any():
raise ValueError("The input vector 'x' must lie within the normal "
"simplex. but np.sum(x, 0) = %s." % np.sum(x, 0))
return x
def _lnB(alpha):
r"""
Internal helper function to compute the log of the useful quotient
.. math::
B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)}
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
B : scalar
Helper quotient, internal use only
"""
return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha))
class dirichlet_gen(multi_rv_generic):
r"""
A Dirichlet random variable.
The `alpha` keyword specifies the concentration parameters of the
distribution.
.. versionadded:: 0.15.0
Methods
-------
``pdf(x, alpha)``
Probability density function.
``logpdf(x, alpha)``
Log of the probability density function.
``rvs(alpha, size=1, random_state=None)``
Draw random samples from a Dirichlet distribution.
``mean(alpha)``
The mean of the Dirichlet distribution
``var(alpha)``
The variance of the Dirichlet distribution
``entropy(alpha)``
Compute the differential entropy of the Dirichlet distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix
concentration parameters, returning a "frozen" Dirichlet
random variable:
rv = dirichlet(alpha)
- Frozen object with the same methods but holding the given
concentration parameters fixed.
Notes
-----
Each :math:`\alpha` entry must be positive. The distribution has only
support on the simplex defined by
.. math::
\sum_{i=1}^{K} x_i \le 1
The probability density function for `dirichlet` is
.. math::
f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1}
where
.. math::
\mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)}
{\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)}
and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the
concentration parameters and :math:`K` is the dimension of the space
where :math:`x` takes values.
Note that the dirichlet interface is somewhat inconsistent.
The array returned by the rvs function is transposed
with respect to the format expected by the pdf and logpdf.
"""
def __init__(self, seed=None):
super(dirichlet_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params)
def __call__(self, alpha, seed=None):
return dirichlet_frozen(alpha, seed=seed)
def _logpdf(self, x, alpha):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
%(_dirichlet_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
lnB = _lnB(alpha)
return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0)
def logpdf(self, x, alpha):
"""
Log of the Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = self._logpdf(x, alpha)
return _squeeze_output(out)
def pdf(self, x, alpha):
"""
The Dirichlet probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_dirichlet_doc_default_callparams)s
Returns
-------
pdf : ndarray
The probability density function evaluated at `x`.
"""
alpha = _dirichlet_check_parameters(alpha)
x = _dirichlet_check_input(alpha, x)
out = np.exp(self._logpdf(x, alpha))
return _squeeze_output(out)
def mean(self, alpha):
"""
Compute the mean of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
mu : scalar
Mean of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
out = alpha / (np.sum(alpha))
return _squeeze_output(out)
def var(self, alpha):
"""
Compute the variance of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
v : scalar
Variance of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1))
return out
def entropy(self, alpha):
"""
Compute the differential entropy of the dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Dirichlet distribution
"""
alpha = _dirichlet_check_parameters(alpha)
alpha0 = np.sum(alpha)
lnB = _lnB(alpha)
K = alpha.shape[0]
out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
(alpha - 1) * scipy.special.psi(alpha))
return _squeeze_output(out)
def rvs(self, alpha, size=1, random_state=None):
"""
Draw random samples from a Dirichlet distribution.
Parameters
----------
%(_dirichlet_doc_default_callparams)s
size : int, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `N`), where `N` is the
dimension of the random variable.
"""
alpha = _dirichlet_check_parameters(alpha)
random_state = self._get_random_state(random_state)
return random_state.dirichlet(alpha, size=size)
dirichlet = dirichlet_gen()
class dirichlet_frozen(multi_rv_frozen):
def __init__(self, alpha, seed=None):
self.alpha = _dirichlet_check_parameters(alpha)
self._dist = dirichlet_gen(seed)
def logpdf(self, x):
return self._dist.logpdf(x, self.alpha)
def pdf(self, x):
return self._dist.pdf(x, self.alpha)
def mean(self):
return self._dist.mean(self.alpha)
def var(self):
return self._dist.var(self.alpha)
def entropy(self):
return self._dist.entropy(self.alpha)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.alpha, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multivariate_normal_gen and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']:
method = dirichlet_gen.__dict__[name]
method_frozen = dirichlet_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, dirichlet_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params)
_wishart_doc_default_callparams = """\
df : int
Degrees of freedom, must be greater than or equal to dimension of the
scale matrix
scale : array_like
Symmetric positive definite scale matrix of the distribution
"""
_wishart_doc_callparams_note = ""
_wishart_doc_frozen_callparams = ""
_wishart_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
wishart_docdict_params = {
'_doc_default_callparams': _wishart_doc_default_callparams,
'_doc_callparams_note': _wishart_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
wishart_docdict_noparams = {
'_doc_default_callparams': _wishart_doc_frozen_callparams,
'_doc_callparams_note': _wishart_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class wishart_gen(multi_rv_generic):
r"""
A Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal precision matrix (the inverse of the covariance
matrix).
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from a Wishart distribution.
``entropy()``
Compute the differential entropy of the Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" Wishart random
variable:
rv = wishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
invwishart, chi2
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The Wishart distribution is often denoted
.. math::
W_p(\nu, \Sigma)
where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the
:math:`p \times p` scale matrix.
The probability density function for `wishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then
its PDF is given by:
.. math::
f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} }
|\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )}
\exp\left( -tr(\Sigma^{-1} S) / 2 \right)
If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then
:math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart).
If the scale matrix is 1-dimensional and equal to one, then the Wishart
distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)`
distribution.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate
Generator", Applied Statistics, vol. 21, pp. 341-345, 1972.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import wishart, chi2
>>> x = np.linspace(1e-5, 8, 100)
>>> w = wishart.pdf(x, df=3, scale=1); w[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> c = chi2.pdf(x, 3); c[:5]
array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ])
>>> plt.plot(x, w)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(wishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen Wishart distribution.
See `wishart_frozen` for more information.
"""
return wishart_frozen(df, scale, seed)
def _process_parameters(self, df, scale):
if scale is None:
scale = 1.0
scale = np.asarray(scale, dtype=float)
if scale.ndim == 0:
scale = scale[np.newaxis,np.newaxis]
elif scale.ndim == 1:
scale = np.diag(scale)
elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]:
raise ValueError("Array 'scale' must be square if it is two"
" dimensional, but scale.scale = %s."
% str(scale.shape))
elif scale.ndim > 2:
raise ValueError("Array 'scale' must be at most two-dimensional,"
" but scale.ndim = %d" % scale.ndim)
dim = scale.shape[0]
if df is None:
df = dim
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df < dim:
raise ValueError("Degrees of freedom cannot be less than dimension"
" of scale matrix, but df = %d" % df)
return dim, df, scale
def _process_quantiles(self, x, dim):
"""
Adjust quantiles array so that last axis labels the components of
each data point.
"""
x = np.asarray(x, dtype=float)
if x.ndim == 0:
x = x * np.eye(dim)[:, :, np.newaxis]
if x.ndim == 1:
if dim == 1:
x = x[np.newaxis, np.newaxis, :]
else:
x = np.diag(x)[:, :, np.newaxis]
elif x.ndim == 2:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square if they are two"
" dimensional, but x.shape = %s."
% str(x.shape))
x = x[:, :, np.newaxis]
elif x.ndim == 3:
if not x.shape[0] == x.shape[1]:
raise ValueError("Quantiles must be square in the first two"
" dimensions if they are three dimensional"
", but x.shape = %s." % str(x.shape))
elif x.ndim > 3:
raise ValueError("Quantiles must be at most two-dimensional with"
" an additional dimension for multiple"
"components, but x.ndim = %d" % x.ndim)
# Now we have 3-dim array; should have shape [dim, dim, *]
if not x.shape[0:2] == (dim, dim):
raise ValueError('Quantiles have incompatible dimensions: should'
' be %s, got %s.' % ((dim, dim), x.shape[0:2]))
return x
def _process_size(self, size):
size = np.asarray(size)
if size.ndim == 0:
size = size[np.newaxis]
elif size.ndim > 1:
raise ValueError('Size must be an integer or tuple of integers;'
' thus must have dimension <= 1.'
' Got size.ndim = %s' % str(tuple(size)))
n = size.prod()
shape = tuple(size)
return n, shape
def _logpdf(self, x, dim, df, scale, log_det_scale, C):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
# log determinant of x
# Note: x has components along the last axis, so that x.T has
# components alone the 0-th axis. Then since det(A) = det(A'), this
# gives us a 1-dim vector of determinants
# Retrieve tr(scale^{-1} x)
log_det_x = np.zeros(x.shape[-1])
scale_inv_x = np.zeros(x.shape)
tr_scale_inv_x = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
_, log_det_x[i] = self._cholesky_logdet(x[:,:,i])
scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i])
tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace()
# Log PDF
out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) -
(0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale +
multigammaln(0.5*df, dim)))
return out
def logpdf(self, x, df, scale):
"""
Log of the Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
# Cholesky decomposition of scale, get log(det(scale))
C, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale, C)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
return df * scale
def mean(self, df, scale):
"""
Mean of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out)
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
if df >= dim + 1:
out = (df-dim-1) * scale
else:
out = None
return out
def mode(self, df, scale):
"""
Mode of the Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float or None
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
var = scale**2
diag = scale.diagonal() # 1 x dim array
var += np.outer(diag, diag)
var *= df
return var
def var(self, df, scale):
"""
Variance of the Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out)
def _standard_rvs(self, n, shape, dim, df, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
random_state : np.random.RandomState instance
RandomState used for drawing the random variates.
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
# Random normal variates for off-diagonal elements
n_tril = dim * (dim-1) // 2
covariances = random_state.normal(
size=n*n_tril).reshape(shape+(n_tril,))
# Random chi-square variates for diagonal elements
variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5
for i in range(dim)]].reshape((dim,) + shape[::-1]).T
# Create the A matri(ces) - lower triangular
A = np.zeros(shape + (dim, dim))
# Input the covariances
size_idx = tuple([slice(None,None,None)]*len(shape))
tril_idx = np.tril_indices(dim, k=-1)
A[size_idx + tril_idx] = covariances
# Input the variances
diag_idx = np.diag_indices(dim)
A[size_idx + diag_idx] = variances
return A
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
C : ndarray
Cholesky factorization of the scale matrix, lower triangular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Calculate the matrices A, which are actually lower triangular
# Cholesky factorizations of a matrix B such that B ~ W(df, I)
A = self._standard_rvs(n, shape, dim, df, random_state)
# Calculate SA = C A A' C', where SA ~ W(df, scale)
# Note: this is the product of a (lower) (lower) (lower)' (lower)'
# or, denoting B = AA', it is C B C' where C is the lower
# triangular Cholesky factorization of the scale matrix.
# this appears to conflict with the instructions in [1]_, which
# suggest that it should be D' B D where D is the lower
# triangular factorization of the scale matrix. However, it is
# meant to refer to the Bartlett (1933) representation of a
# Wishart random variate as L A A' L' where L is lower triangular
# so it appears that understanding D' to be upper triangular
# is either a typo in or misreading of [1]_.
for index in np.ndindex(shape):
CA = np.dot(C, A[index])
A[index] = np.dot(CA, CA.T)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from a Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Cholesky decomposition of scale
C = scipy.linalg.cholesky(scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def _entropy(self, dim, df, log_det_scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'entropy' instead.
"""
return (
0.5 * (dim+1) * log_det_scale +
0.5 * dim * (dim+1) * _LOG_2 +
multigammaln(0.5*df, dim) -
0.5 * (df - dim - 1) * np.sum(
[psi(0.5*(df + 1 - (i+1))) for i in range(dim)]
) +
0.5 * df * dim
)
def entropy(self, df, scale):
"""
Compute the differential entropy of the Wishart.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Wishart distribution
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
_, log_det_scale = self._cholesky_logdet(scale)
return self._entropy(dim, df, log_det_scale)
def _cholesky_logdet(self, scale):
"""
Compute Cholesky decomposition and determine (log(det(scale)).
Parameters
----------
scale : ndarray
Scale matrix.
Returns
-------
c_decomp : ndarray
The Cholesky decomposition of `scale`.
logdet : scalar
The log of the determinant of `scale`.
Notes
-----
This computation of ``logdet`` is equivalent to
``np.linalg.slogdet(scale)``. It is ~2x faster though.
"""
c_decomp = scipy.linalg.cholesky(scale, lower=True)
logdet = 2 * np.sum(np.log(c_decomp.diagonal()))
return c_decomp, logdet
wishart = wishart_gen()
class wishart_frozen(multi_rv_frozen):
"""
Create a frozen Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, df, scale, seed=None):
self._dist = wishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale)
self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale, self.C)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out)
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out)
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
return self._dist._entropy(self.dim, self.df, self.log_det_scale)
# Set frozen generator docstrings from corresponding docstrings in
# Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']:
method = wishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
from numpy import asarray_chkfinite, asarray
from scipy.linalg.misc import LinAlgError
from scipy.linalg.lapack import get_lapack_funcs
def _cho_inv_batch(a, check_finite=True):
"""
Invert the matrices a_i, using a Cholesky factorization of A, where
a_i resides in the last two dimensions of a and the other indices describe
the index i.
Overwrites the data in a.
Parameters
----------
a : array
Array of matrices to invert, where the matrices themselves are stored
in the last two dimensions.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Array of inverses of the matrices ``a_i``.
See also
--------
scipy.linalg.cholesky : Cholesky factorization of a matrix
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]:
raise ValueError('expected square matrix in last two dimensions')
potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,))
tril_idx = np.tril_indices(a.shape[-2], k=-1)
triu_idx = np.triu_indices(a.shape[-2], k=1)
for index in np.ndindex(a1.shape[:-2]):
# Cholesky decomposition
a1[index], info = potrf(a1[index], lower=True, overwrite_a=False,
clean=False)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite"
% info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Inversion
a1[index], info = potri(a1[index], lower=True, overwrite_c=False)
if info > 0:
raise LinAlgError("the inverse could not be computed")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal'
' potrf' % -info)
# Make symmetric (dpotri only fills in the lower triangle)
a1[index][triu_idx] = a1[index][tril_idx]
return a1
class invwishart_gen(wishart_gen):
r"""
An inverse Wishart random variable.
The `df` keyword specifies the degrees of freedom. The `scale` keyword
specifies the scale matrix, which must be symmetric and positive definite.
In this context, the scale matrix is often interpreted in terms of a
multivariate normal covariance matrix.
Methods
-------
``pdf(x, df, scale)``
Probability density function.
``logpdf(x, df, scale)``
Log of the probability density function.
``rvs(df, scale, size=1, random_state=None)``
Draw random samples from an inverse Wishart distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Alternatively, the object may be called (as a function) to fix the degrees
of freedom and scale parameters, returning a "frozen" inverse Wishart
random variable:
rv = invwishart(df=1, scale=1)
- Frozen object with the same methods but holding the given
degrees of freedom and scale fixed.
See Also
--------
wishart
Notes
-----
%(_doc_callparams_note)s
The scale matrix `scale` must be a symmetric positive definite
matrix. Singular matrices, including the symmetric positive semi-definite
case, are not supported.
The inverse Wishart distribution is often denoted
.. math::
W_p^{-1}(\nu, \Psi)
where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the
:math:`p \times p` scale matrix.
The probability density function for `invwishart` has support over positive
definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`,
then its PDF is given by:
.. math::
f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} }
|S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)}
\exp\left( -tr(\Sigma S^{-1}) / 2 \right)
If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then
:math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart).
If the scale matrix is 1-dimensional and equal to one, then the inverse
Wishart distribution :math:`W_1(\nu, 1)` collapses to the
inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}`
and scale = :math:`\frac{1}{2}`.
.. versionadded:: 0.16.0
References
----------
.. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach",
Wiley, 1983.
.. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in
Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.stats import invwishart, invgamma
>>> x = np.linspace(0.01, 1, 100)
>>> iw = invwishart.pdf(x, df=6, scale=1)
>>> iw[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> ig = invgamma.pdf(x, 6/2., scale=1./2)
>>> ig[:3]
array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03])
>>> plt.plot(x, iw)
The input quantiles can be any shape of array, as long as the last
axis labels the components.
"""
def __init__(self, seed=None):
super(invwishart_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params)
def __call__(self, df=None, scale=None, seed=None):
"""
Create a frozen inverse Wishart distribution.
See `invwishart_frozen` for more information.
"""
return invwishart_frozen(df, scale, seed)
def _logpdf(self, x, dim, df, scale, log_det_scale):
"""
Parameters
----------
x : ndarray
Points at which to evaluate the log of the probability
density function.
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
scale : ndarray
Scale matrix
log_det_scale : float
Logarithm of the determinant of the scale matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
log_det_x = np.zeros(x.shape[-1])
#scale_x_inv = np.zeros(x.shape)
x_inv = np.copy(x).T
if dim > 1:
_cho_inv_batch(x_inv) # works in-place
else:
x_inv = 1./x_inv
tr_scale_x_inv = np.zeros(x.shape[-1])
for i in range(x.shape[-1]):
C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True)
log_det_x[i] = 2 * np.sum(np.log(C.diagonal()))
#scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T
tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace()
# Log PDF
out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) -
(0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) -
multigammaln(0.5*df, dim))
return out
def logpdf(self, x, df, scale):
"""
Log of the inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Log of the probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
dim, df, scale = self._process_parameters(df, scale)
x = self._process_quantiles(x, dim)
_, log_det_scale = self._cholesky_logdet(scale)
out = self._logpdf(x, dim, df, scale, log_det_scale)
return _squeeze_output(out)
def pdf(self, x, df, scale):
"""
Inverse Wishart probability density function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpdf(x, df, scale))
def _mean(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mean' instead.
"""
if df > dim + 1:
out = scale / (df - dim - 1)
else:
out = None
return out
def mean(self, df, scale):
"""
Mean of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus one.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float or None
The mean of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mean(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _mode(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'mode' instead.
"""
return scale / (df + dim + 1)
def mode(self, df, scale):
"""
Mode of the inverse Wishart distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mode : float
The Mode of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._mode(dim, df, scale)
return _squeeze_output(out)
def _var(self, dim, df, scale):
"""
Parameters
----------
dim : int
Dimension of the scale matrix
%(_doc_default_callparams)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'var' instead.
"""
if df > dim + 3:
var = (df - dim + 1) * scale**2
diag = scale.diagonal() # 1 x dim array
var += (df - dim - 1) * np.outer(diag, diag)
var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3)
else:
var = None
return var
def var(self, df, scale):
"""
Variance of the inverse Wishart distribution
Only valid if the degrees of freedom are greater than the dimension of
the scale matrix plus three.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
var : float
The variance of the distribution
"""
dim, df, scale = self._process_parameters(df, scale)
out = self._var(dim, df, scale)
return _squeeze_output(out) if out is not None else out
def _rvs(self, n, shape, dim, df, C, random_state):
"""
Parameters
----------
n : integer
Number of variates to generate
shape : iterable
Shape of the variates to generate
dim : int
Dimension of the scale matrix
df : int
Degrees of freedom
C : ndarray
Cholesky factorization of the scale matrix, lower triagular.
%(_doc_random_state)s
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'rvs' instead.
"""
random_state = self._get_random_state(random_state)
# Get random draws A such that A ~ W(df, I)
A = super(invwishart_gen, self)._standard_rvs(n, shape, dim,
df, random_state)
# Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale)
eye = np.eye(dim)
trtrs = get_lapack_funcs(('trtrs'), (A,))
for index in np.ndindex(A.shape[:-2]):
# Calculate CA
CA = np.dot(C, A[index])
# Get (C A)^{-1} via triangular solver
if dim > 1:
CA, info = trtrs(CA, eye, lower=True)
if info > 0:
raise LinAlgError("Singular matrix.")
if info < 0:
raise ValueError('Illegal value in %d-th argument of'
' internal trtrs' % -info)
else:
CA = 1. / CA
# Get SA
A[index] = np.dot(CA.T, CA)
return A
def rvs(self, df, scale, size=1, random_state=None):
"""
Draw random samples from an inverse Wishart distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray
Random variates of shape (`size`) + (`dim`, `dim), where `dim` is
the dimension of the scale matrix.
Notes
-----
%(_doc_callparams_note)s
"""
n, shape = self._process_size(size)
dim, df, scale = self._process_parameters(df, scale)
# Invert the scale
eye = np.eye(dim)
L, lower = scipy.linalg.cho_factor(scale, lower=True)
inv_scale = scipy.linalg.cho_solve((L, lower), eye)
# Cholesky decomposition of inverted scale
C = scipy.linalg.cholesky(inv_scale, lower=True)
out = self._rvs(n, shape, dim, df, C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
invwishart = invwishart_gen()
class invwishart_frozen(multi_rv_frozen):
def __init__(self, df, scale, seed=None):
"""
Create a frozen inverse Wishart distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution
scale : array_like
Scale matrix of the distribution
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
self._dist = invwishart_gen(seed)
self.dim, self.df, self.scale = self._dist._process_parameters(
df, scale
)
# Get the determinant via Cholesky factorization
C, lower = scipy.linalg.cho_factor(self.scale, lower=True)
self.log_det_scale = 2 * np.sum(np.log(C.diagonal()))
# Get the inverse using the Cholesky factorization
eye = np.eye(self.dim)
self.inv_scale = scipy.linalg.cho_solve((C, lower), eye)
# Get the Cholesky factorization of the inverse scale
self.C = scipy.linalg.cholesky(self.inv_scale, lower=True)
def logpdf(self, x):
x = self._dist._process_quantiles(x, self.dim)
out = self._dist._logpdf(x, self.dim, self.df, self.scale,
self.log_det_scale)
return _squeeze_output(out)
def pdf(self, x):
return np.exp(self.logpdf(x))
def mean(self):
out = self._dist._mean(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def mode(self):
out = self._dist._mode(self.dim, self.df, self.scale)
return _squeeze_output(out)
def var(self):
out = self._dist._var(self.dim, self.df, self.scale)
return _squeeze_output(out) if out is not None else out
def rvs(self, size=1, random_state=None):
n, shape = self._dist._process_size(size)
out = self._dist._rvs(n, shape, self.dim, self.df,
self.C, random_state)
return _squeeze_output(out)
def entropy(self):
# Need to find reference for inverse Wishart entropy
raise AttributeError
# Set frozen generator docstrings from corresponding docstrings in
# inverse Wishart and fill in default strings in class docstrings
for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']:
method = invwishart_gen.__dict__[name]
method_frozen = wishart_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, wishart_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
_multinomial_doc_default_callparams = """\
n : int
Number of trials
p : array_like
Probability of a trial falling into each category; should sum to 1
"""
_multinomial_doc_callparams_note = \
"""`n` should be a positive integer. Each element of `p` should be in the
interval :math:`[0,1]` and the elements should sum to 1. If they do not sum to
1, the last element of the `p` array is not used and is replaced with the
remaining probability left over from the earlier elements.
"""
_multinomial_doc_frozen_callparams = ""
_multinomial_doc_frozen_callparams_note = \
"""See class definition for a detailed description of parameters."""
multinomial_docdict_params = {
'_doc_default_callparams': _multinomial_doc_default_callparams,
'_doc_callparams_note': _multinomial_doc_callparams_note,
'_doc_random_state': _doc_random_state
}
multinomial_docdict_noparams = {
'_doc_default_callparams': _multinomial_doc_frozen_callparams,
'_doc_callparams_note': _multinomial_doc_frozen_callparams_note,
'_doc_random_state': _doc_random_state
}
class multinomial_gen(multi_rv_generic):
r"""
A multinomial random variable.
Methods
-------
``pmf(x, n, p)``
Probability mass function.
``logpmf(x, n, p)``
Log of the probability mass function.
``rvs(n, p, size=1, random_state=None)``
Draw random samples from a multinomial distribution.
``entropy(n, p)``
Compute the entropy of the multinomial distribution.
``cov(n, p)``
Compute the covariance matrix of the multinomial distribution.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
%(_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_doc_callparams_note)s
Alternatively, the object may be called (as a function) to fix the `n` and
`p` parameters, returning a "frozen" multinomial random variable:
The probability mass function for `multinomial` is
.. math::
f(x) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots p_k^{x_k},
supported on :math:`x=(x_1, \ldots, x_k)` where each :math:`x_i` is a
nonnegative integer and their sum is :math:`n`.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy.stats import multinomial
>>> rv = multinomial(8, [0.3, 0.2, 0.5])
>>> rv.pmf([1, 3, 4])
0.042000000000000072
The multinomial distribution for :math:`k=2` is identical to the
corresponding binomial distribution (tiny numerical differences
notwithstanding):
>>> from scipy.stats import binom
>>> multinomial.pmf([3, 4], n=7, p=[0.4, 0.6])
0.29030399999999973
>>> binom.pmf(3, 7, 0.4)
0.29030400000000012
The functions ``pmf``, ``logpmf``, ``entropy``, and ``cov`` support
broadcasting, under the convention that the vector parameters (``x`` and
``p``) are interpreted as if each row along the last axis is a single
object. For instance:
>>> multinomial.pmf([[3, 4], [3, 5]], n=[7, 8], p=[.3, .7])
array([0.2268945, 0.25412184])
Here, ``x.shape == (2, 2)``, ``n.shape == (2,)``, and ``p.shape == (2,)``,
but following the rules mentioned above they behave as if the rows
``[3, 4]`` and ``[3, 5]`` in ``x`` and ``[.3, .7]`` in ``p`` were a single
object, and as if we had ``x.shape = (2,)``, ``n.shape = (2,)``, and
``p.shape = ()``. To obtain the individual elements without broadcasting,
we would do this:
>>> multinomial.pmf([3, 4], n=7, p=[.3, .7])
0.2268945
>>> multinomial.pmf([3, 5], 8, p=[.3, .7])
0.25412184
This broadcasting also works for ``cov``, where the output objects are
square matrices of size ``p.shape[-1]``. For example:
>>> multinomial.cov([4, 5], [[.3, .7], [.4, .6]])
array([[[ 0.84, -0.84],
[-0.84, 0.84]],
[[ 1.2 , -1.2 ],
[-1.2 , 1.2 ]]])
In this example, ``n.shape == (2,)`` and ``p.shape == (2, 2)``, and
following the rules above, these broadcast as if ``p.shape == (2,)``.
Thus the result should also be of shape ``(2,)``, but since each output is
a :math:`2 \times 2` matrix, the result in fact has shape ``(2, 2, 2)``,
where ``result[0]`` is equal to ``multinomial.cov(n=4, p=[.3, .7])`` and
``result[1]`` is equal to ``multinomial.cov(n=5, p=[.4, .6])``.
See also
--------
scipy.stats.binom : The binomial distribution.
numpy.random.multinomial : Sampling from the multinomial distribution.
"""
def __init__(self, seed=None):
super(multinomial_gen, self).__init__(seed)
self.__doc__ = \
doccer.docformat(self.__doc__, multinomial_docdict_params)
def __call__(self, n, p, seed=None):
"""
Create a frozen multinomial distribution.
See `multinomial_frozen` for more information.
"""
return multinomial_frozen(n, p, seed)
def _process_parameters(self, n, p):
"""
Return: n_, p_, npcond.
n_ and p_ are arrays of the correct shape; npcond is a boolean array
flagging values out of the domain.
"""
p = np.array(p, dtype=np.float64, copy=True)
p[...,-1] = 1. - p[...,:-1].sum(axis=-1)
# true for bad p
pcond = np.any(p <= 0, axis=-1)
pcond |= np.any(p > 1, axis=-1)
n = np.array(n, dtype=np.int, copy=True)
# true for bad n
ncond = n <= 0
return n, p, ncond | pcond
def _process_quantiles(self, x, n, p):
"""
Return: x_, xcond.
x_ is an int array; xcond is a boolean array flagging values out of the
domain.
"""
xx = np.asarray(x, dtype=np.int)
if xx.ndim == 0:
raise ValueError("x must be an array.")
if xx.size != 0 and not xx.shape[-1] == p.shape[-1]:
raise ValueError("Size of each quantile should be size of p: "
"received %d, but expected %d." % (xx.shape[-1], p.shape[-1]))
# true for x out of the domain
cond = np.any(xx != x, axis=-1)
cond |= np.any(xx < 0, axis=-1)
cond = cond | (np.sum(xx, axis=-1) != n)
return xx, cond
def _checkresult(self, result, cond, bad_value):
result = np.asarray(result)
if cond.ndim != 0:
result[cond] = bad_value
elif cond:
if result.ndim == 0:
return bad_value
result[...] = bad_value
return result
def _logpmf(self, x, n, p):
return gammaln(n+1) + np.sum(xlogy(x, p) - gammaln(x+1), axis=-1)
def logpmf(self, x, n, p):
"""
Log of the Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
logpmf : ndarray or scalar
Log of the probability mass function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x, xcond = self._process_quantiles(x, n, p)
result = self._logpmf(x, n, p)
# replace values for which x was out of the domain; broadcast
# xcond to the right shape
xcond_ = xcond | np.zeros(npcond.shape, dtype=np.bool_)
result = self._checkresult(result, xcond_, np.NINF)
# replace values bad for n or p; broadcast npcond to the right shape
npcond_ = npcond | np.zeros(xcond.shape, dtype=np.bool_)
return self._checkresult(result, npcond_, np.NAN)
def pmf(self, x, n, p):
"""
Multinomial probability mass function.
Parameters
----------
x : array_like
Quantiles, with the last axis of `x` denoting the components.
Each quantile must be a symmetric positive definite matrix.
%(_doc_default_callparams)s
Returns
-------
pmf : ndarray or scalar
Probability density function evaluated at `x`
Notes
-----
%(_doc_callparams_note)s
"""
return np.exp(self.logpmf(x, n, p))
def mean(self, n, p):
"""
Mean of the Multinomial distribution
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
mean : float
The mean of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
result = n[..., np.newaxis]*p
return self._checkresult(result, npcond, np.NAN)
def cov(self, n, p):
"""
Covariance matrix of the multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
cov : ndarray
The covariance matrix of the distribution
"""
n, p, npcond = self._process_parameters(n, p)
nn = n[..., np.newaxis, np.newaxis]
result = nn * np.einsum('...j,...k->...jk', -p, p)
# change the diagonal
for i in range(p.shape[-1]):
result[...,i, i] += n*p[..., i]
return self._checkresult(result, npcond, np.nan)
def entropy(self, n, p):
r"""
Compute the entropy of the multinomial distribution.
The entropy is computed using this expression:
.. math::
f(x) = - \log n! - n\sum_{i=1}^k p_i \log p_i +
\sum_{i=1}^k \sum_{x=0}^n \binom n x p_i^x(1-p_i)^{n-x} \log x!
Parameters
----------
%(_doc_default_callparams)s
Returns
-------
h : scalar
Entropy of the Multinomial distribution
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
x = np.r_[1:np.max(n)+1]
term1 = n*np.sum(entr(p), axis=-1)
term1 -= gammaln(n+1)
n = n[..., np.newaxis]
new_axes_needed = max(p.ndim, n.ndim) - x.ndim + 1
x.shape += (1,)*new_axes_needed
term2 = np.sum(binom.pmf(x, n, p)*gammaln(x+1),
axis=(-1, -1-new_axes_needed))
return self._checkresult(term1 + term2, npcond, np.nan)
def rvs(self, n, p, size=None, random_state=None):
"""
Draw random samples from a Multinomial distribution.
Parameters
----------
%(_doc_default_callparams)s
size : integer or iterable of integers, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of shape (`size`, `len(p)`)
Notes
-----
%(_doc_callparams_note)s
"""
n, p, npcond = self._process_parameters(n, p)
random_state = self._get_random_state(random_state)
return random_state.multinomial(n, p, size)
multinomial = multinomial_gen()
class multinomial_frozen(multi_rv_frozen):
r"""
Create a frozen Multinomial distribution.
Parameters
----------
n : int
number of trials
p: array_like
probability of a trial falling into each category; should sum to 1
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
"""
def __init__(self, n, p, seed=None):
self._dist = multinomial_gen(seed)
self.n, self.p, self.npcond = self._dist._process_parameters(n, p)
# monkey patch self._dist
def _process_parameters(n, p):
return self.n, self.p, self.npcond
self._dist._process_parameters = _process_parameters
def logpmf(self, x):
return self._dist.logpmf(x, self.n, self.p)
def pmf(self, x):
return self._dist.pmf(x, self.n, self.p)
def mean(self):
return self._dist.mean(self.n, self.p)
def cov(self):
return self._dist.cov(self.n, self.p)
def entropy(self):
return self._dist.entropy(self.n, self.p)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.n, self.p, size, random_state)
# Set frozen generator docstrings from corresponding docstrings in
# multinomial and fill in default strings in class docstrings
for name in ['logpmf', 'pmf', 'mean', 'cov', 'rvs']:
method = multinomial_gen.__dict__[name]
method_frozen = multinomial_frozen.__dict__[name]
method_frozen.__doc__ = doccer.docformat(
method.__doc__, multinomial_docdict_noparams)
method.__doc__ = doccer.docformat(method.__doc__,
multinomial_docdict_params)
class special_ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued SO(N) random variable.
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from SO(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is wrapping the random_rot code from the MDP Toolkit,
https://github.com/mdp-toolkit/mdp-toolkit
Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., "The efficient generation of random orthogonal
matrices with an application to condition estimators", SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization
See also the similar `ortho_group`.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> x = special_ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> scipy.linalg.det(x)
1.0
This generates one random matrix from SO(3). It is orthogonal and
has a determinant of 1.
"""
def __init__(self, seed=None):
super(special_ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def __call__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
See `special_ortho_group_frozen` for more information.
"""
return special_ortho_group_frozen(dim, seed=seed)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("""Dimension of rotation must be specified,
and must be a scalar greater than 1.""")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from SO(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
D = np.ones((dim,))
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
D[n-1] = np.sign(x[0])
x[0] -= D[n-1]*np.sqrt((x*x).sum())
# Householder transformation
Hx = (np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = (-1)**(1-(dim % 2))*D.prod()
# Equivalent to np.dot(np.diag(D), H) but faster, apparently
H = (D*H.T).T
return H
special_ortho_group = special_ortho_group_gen()
class special_ortho_group_frozen(multi_rv_frozen):
def __init__(self, dim=None, seed=None):
"""
Create a frozen SO(N) distribution.
Parameters
----------
dim : scalar
Dimension of matrices
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Examples
--------
>>> from scipy.stats import special_ortho_group
>>> g = special_ortho_group(5)
>>> x = g.rvs()
"""
self._dist = special_ortho_group_gen(seed)
self.dim = self._dist._process_parameters(dim)
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.dim, size, random_state)
class ortho_group_gen(multi_rv_generic):
r"""
A matrix-valued O(N) random variable.
Return a random orthogonal matrix, drawn from the O(N) Haar
distribution (the only uniform distribution on O(N)).
The `dim` keyword specifies the dimension N.
Methods
-------
``rvs(dim=None, size=1, random_state=None)``
Draw random samples from O(N).
Parameters
----------
dim : scalar
Dimension of matrices
Notes
----------
This class is closely related to `special_ortho_group`.
Some care is taken to avoid numerical error, as per the paper by Mezzadri.
References
----------
.. [1] F. Mezzadri, "How to generate random matrices from the classical
compact groups", :arXiv:`math-ph/0609050v2`.
Examples
--------
>>> from scipy.stats import ortho_group
>>> x = ortho_group.rvs(3)
>>> np.dot(x, x.T)
array([[ 1.00000000e+00, 1.13231364e-17, -2.86852790e-16],
[ 1.13231364e-17, 1.00000000e+00, -1.46845020e-16],
[ -2.86852790e-16, -1.46845020e-16, 1.00000000e+00]])
>>> import scipy.linalg
>>> np.fabs(scipy.linalg.det(x))
1.0
This generates one random matrix from O(3). It is orthogonal and
has a determinant of +1 or -1.
"""
def __init__(self, seed=None):
super(ortho_group_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, dim):
"""
Dimension N must be specified; it cannot be inferred.
"""
if dim is None or not np.isscalar(dim) or dim <= 1 or dim != int(dim):
raise ValueError("Dimension of rotation must be specified,"
"and must be a scalar greater than 1.")
return dim
def rvs(self, dim, size=1, random_state=None):
"""
Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([self.rvs(dim, size=1, random_state=random_state)
for i in range(size)])
dim = self._process_parameters(dim)
random_state = self._get_random_state(random_state)
H = np.eye(dim)
for n in range(1, dim):
x = random_state.normal(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D*np.sqrt((x*x).sum())
# Householder transformation
Hx = -D*(np.eye(dim-n+1)
- 2.*np.outer(x, x)/(x*x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
ortho_group = ortho_group_gen()
class random_correlation_gen(multi_rv_generic):
r"""
A random correlation matrix.
Return a random correlation matrix, given a vector of eigenvalues.
The `eigs` keyword specifies the eigenvalues of the correlation matrix,
and implies the dimension.
Methods
-------
``rvs(eigs=None, random_state=None)``
Draw random correlation matrices, all with eigenvalues eigs.
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix.
Notes
----------
Generates a random correlation matrix following a numerically stable
algorithm spelled out by Davies & Higham. This algorithm uses a single O(N)
similarity transformation to construct a symmetric positive semi-definite
matrix, and applies a series of Givens rotations to scale it to have ones
on the diagonal.
References
----------
.. [1] Davies, Philip I; Higham, Nicholas J; "Numerically stable generation
of correlation matrices and their factors", BIT 2000, Vol. 40,
No. 4, pp. 640 651
Examples
--------
>>> from scipy.stats import random_correlation
>>> np.random.seed(514)
>>> x = random_correlation.rvs((.5, .8, 1.2, 1.5))
>>> x
array([[ 1. , -0.20387311, 0.18366501, -0.04953711],
[-0.20387311, 1. , -0.24351129, 0.06703474],
[ 0.18366501, -0.24351129, 1. , 0.38530195],
[-0.04953711, 0.06703474, 0.38530195, 1. ]])
>>> import scipy.linalg
>>> e, v = scipy.linalg.eigh(x)
>>> e
array([ 0.5, 0.8, 1.2, 1.5])
"""
def __init__(self, seed=None):
super(random_correlation_gen, self).__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__)
def _process_parameters(self, eigs, tol):
eigs = np.asarray(eigs, dtype=float)
dim = eigs.size
if eigs.ndim != 1 or eigs.shape[0] != dim or dim <= 1:
raise ValueError("Array 'eigs' must be a vector of length greater than 1.")
if np.fabs(np.sum(eigs) - dim) > tol:
raise ValueError("Sum of eigenvalues must equal dimensionality.")
for x in eigs:
if x < -tol:
raise ValueError("All eigenvalues must be non-negative.")
return dim, eigs
def _givens_to_1(self, aii, ajj, aij):
"""Computes a 2x2 Givens matrix to put 1's on the diagonal for the input matrix.
The input matrix is a 2x2 symmetric matrix M = [ aii aij ; aij ajj ].
The output matrix g is a 2x2 anti-symmetric matrix of the form [ c s ; -s c ];
the elements c and s are returned.
Applying the output matrix to the input matrix (as b=g.T M g)
results in a matrix with bii=1, provided tr(M) - det(M) >= 1
and floating point issues do not occur. Otherwise, some other
valid rotation is returned. When tr(M)==2, also bjj=1.
"""
aiid = aii - 1.
ajjd = ajj - 1.
if ajjd == 0:
# ajj==1, so swap aii and ajj to avoid division by zero
return 0., 1.
dd = math.sqrt(max(aij**2 - aiid*ajjd, 0))
# The choice of t should be chosen to avoid cancellation [1]
t = (aij + math.copysign(dd, aij)) / ajjd
c = 1. / math.sqrt(1. + t*t)
if c == 0:
# Underflow
s = 1.0
else:
s = c*t
return c, s
def _to_corr(self, m):
"""
Given a psd matrix m, rotate to put one's on the diagonal, turning it
into a correlation matrix. This also requires the trace equal the
dimensionality. Note: modifies input matrix
"""
# Check requirements for in-place Givens
if not (m.flags.c_contiguous and m.dtype == np.float64 and m.shape[0] == m.shape[1]):
raise ValueError()
d = m.shape[0]
for i in range(d-1):
if m[i,i] == 1:
continue
elif m[i, i] > 1:
for j in range(i+1, d):
if m[j, j] < 1:
break
else:
for j in range(i+1, d):
if m[j, j] > 1:
break
c, s = self._givens_to_1(m[i,i], m[j,j], m[i,j])
# Use BLAS to apply Givens rotations in-place. Equivalent to:
# g = np.eye(d)
# g[i, i] = g[j,j] = c
# g[j, i] = -s; g[i, j] = s
# m = np.dot(g.T, np.dot(m, g))
mv = m.ravel()
drot(mv, mv, c, -s, n=d,
offx=i*d, incx=1, offy=j*d, incy=1,
overwrite_x=True, overwrite_y=True)
drot(mv, mv, c, -s, n=d,
offx=i, incx=d, offy=j, incy=d,
overwrite_x=True, overwrite_y=True)
return m
def rvs(self, eigs, random_state=None, tol=1e-13, diag_tol=1e-7):
"""
Draw random correlation matrices
Parameters
----------
eigs : 1d ndarray
Eigenvalues of correlation matrix
tol : float, optional
Tolerance for input parameter checks
diag_tol : float, optional
Tolerance for deviation of the diagonal of the resulting
matrix. Default: 1e-7
Raises
------
RuntimeError
Floating point error prevented generating a valid correlation
matrix.
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim),
each having eigenvalues eigs.
"""
dim, eigs = self._process_parameters(eigs, tol=tol)
random_state = self._get_random_state(random_state)
m = ortho_group.rvs(dim, random_state=random_state)
m = np.dot(np.dot(m, np.diag(eigs)), m.T) # Set the trace of m
m = self._to_corr(m) # Carefully rotate to unit diagonal
# Check diagonal
if abs(m.diagonal() - 1).max() > diag_tol:
raise RuntimeError("Failed to generate a valid correlation matrix")
return m
random_correlation = random_correlation_gen()
| bsd-3-clause |
lresende/incubator-zeppelin | interpreter/lib/python/backend_zinline.py | 61 | 11831 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides a static (non-interactive) matplotlib plotting backend
# for zeppelin notebooks for use with the python/pyspark interpreters
from __future__ import print_function
import sys
import uuid
import warnings
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import mpl_config
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib.backend_bases import ShowBase, FigureManagerBase
from matplotlib.figure import Figure
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class Show(ShowBase):
"""
A callable object that displays the figures to the screen. Valid kwargs
include figure width and height (in units supported by the div tag), block
(allows users to override blocking behavior regardless of whether or not
interactive mode is enabled, currently unused) and close (Implicitly call
matplotlib.pyplot.close('all') with each call to show()).
"""
def __call__(self, close=None, block=None, **kwargs):
if close is None:
close = mpl_config.get('close')
try:
managers = Gcf.get_all_fig_managers()
if not managers:
return
# Tell zeppelin that the output will be html using the %html magic
# We want to do this only once to avoid seeing "%html" printed
# directly to the outout when multiple figures are displayed from
# one paragraph.
if mpl_config.get('angular'):
print('%angular')
else:
print('%html')
# Show all open figures
for manager in managers:
manager.show(**kwargs)
finally:
# This closes all the figures if close is set to True.
if close and Gcf.get_all_fig_managers():
Gcf.destroy_all()
class FigureCanvasZInline(FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
"""
def get_bytes(self, **kwargs):
"""
Get the byte representation of the figure.
Should only be used with jpg/png formats.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
raise ValueError("get_bytes() does not support svg, use png or jpg")
# Express the image as bytes
buf = BytesIO()
self.print_figure(buf, **kwargs)
fmt = fmt.encode()
if sys.version_info >= (3, 4) and sys.version_info < (3, 5):
byte_str = bytes("data:image/%s;base64," %fmt, "utf-8")
else:
byte_str = b"data:image/%s;base64," %fmt
byte_str += base64.b64encode(buf.getvalue())
# Python3 forces all strings to default to unicode, but for raster image
# formats (eg png, jpg), we want to work with bytes. Thus this step is
# needed to ensure compatability for all python versions.
byte_str = byte_str.decode('ascii')
buf.close()
return byte_str
def get_svg(self, **kwargs):
"""
Get the svg representation of the figure.
Should only be used with svg format.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt != 'svg':
raise ValueError("get_svg() does not support png or jpg, use svg")
# For SVG the data string has to be unicode, not bytes
buf = StringIO()
self.print_figure(buf, **kwargs)
svg_str = buf.getvalue()
buf.close()
return svg_str
def draw_idle(self, *args, **kwargs):
"""
Called when the figure gets updated (eg through a plotting command).
This is overriden to allow open figures to be reshown after they
are updated when mpl_config.get('close') is False.
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
draw_if_interactive()
class FigureManagerZInline(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.fig_id = "figure_{0}".format(uuid.uuid4().hex)
self._shown = False
def angular_bind(self, **kwargs):
"""
Bind figure data to Zeppelin's Angular Object Registry.
If mpl_config("angular") is True and PY4J is supported, this allows
for the possibility to interactively update a figure from a separate
paragraph without having to display it multiple times.
"""
# This doesn't work for SVG so make sure it's not our format
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
return
# Get the figure data as a byte array
src = self.canvas.get_bytes(**kwargs)
# Flag to determine whether or not to use
# zeppelin's angular display system
angular = mpl_config.get('angular')
# ZeppelinContext instance (requires PY4J)
context = mpl_config.get('context')
# Finally we must ensure that automatic closing is set to False,
# as otherwise using the angular display system is pointless
close = mpl_config.get('close')
# If above conditions are met, bind the figure data to
# the Angular Object Registry.
if not close and angular:
if hasattr(context, 'angularBind'):
# Binding is performed through figure ID to ensure this works
# if multiple figures are open
context.angularBind(self.fig_id, src)
# Zeppelin will automatically replace this value even if it
# is updated from another pargraph thanks to the {{}} notation
src = "{{%s}}" %self.fig_id
else:
warnings.warn("Cannot bind figure to Angular Object Registry. "
"Check if PY4J is installed.")
return src
def angular_unbind(self):
"""
Unbind figure from angular display system.
"""
context = mpl_config.get('context')
if hasattr(context, 'angularUnbind'):
context.angularUnbind(self.fig_id)
def destroy(self):
"""
Called when close=True or implicitly by pyplot.close().
Overriden to automatically clean up the angular object registry.
"""
self.angular_unbind()
def show(self, **kwargs):
if not self._shown:
zdisplay(self.canvas.figure, **kwargs)
else:
self.canvas.draw_idle()
self.angular_bind(**kwargs)
self._shown = True
def draw_if_interactive():
"""
If interactive mode is on, this allows for updating properties of
the figure when each new plotting command is called.
"""
manager = Gcf.get_active()
interactive = matplotlib.is_interactive()
angular = mpl_config.get('angular')
# Don't bother continuing if we aren't in interactive mode
# or if there are no active figures. Also pointless to continue
# in angular mode as we don't want to reshow the figure.
if not interactive or angular or manager is None:
return
# Allow for figure to be reshown if close is false since
# this function call implies that it has been updated
if not mpl_config.get('close'):
manager._shown = False
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasZInline(figure)
manager = FigureManagerZInline(canvas, num)
return manager
########################################################################
#
# Backend specific functions
#
########################################################################
def zdisplay(fig, **kwargs):
"""
Publishes a matplotlib figure to the notebook paragraph output.
"""
# kwargs can be width or height (in units supported by div tag)
width = kwargs.pop('width', 'auto')
height = kwargs.pop('height', 'auto')
fmt = kwargs.get('format', mpl_config.get('format'))
# Check if format is supported
supported_formats = mpl_config.get('supported_formats')
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
# For SVG the data string has to be unicode, not bytes
if fmt == 'svg':
img = fig.canvas.get_svg(**kwargs)
# This is needed to ensure the SVG image is the correct size.
# We should find a better way to do this...
width = '{}px'.format(mpl_config.get('width'))
height = '{}px'.format(mpl_config.get('height'))
else:
# Express the image as bytes
src = fig.canvas.manager.angular_bind(**kwargs)
img = "<img src={src} style='width={width};height:{height}'>"
img = img.format(src=src, width=width, height=height)
# Print the image to the notebook paragraph via the %html magic
html = "<div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img))
def displayhook():
"""
Called post paragraph execution if interactive mode is on
"""
if matplotlib.is_interactive():
show()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
# Create a reference to the show function we are using. This is what actually
# gets called by matplotlib.pyplot.show().
show = Show()
# Default FigureCanvas and FigureManager classes to use from the backend
FigureCanvas = FigureCanvasZInline
FigureManager = FigureManagerZInline
| apache-2.0 |
Obus/scikit-learn | sklearn/cluster/tests/test_k_means.py | 132 | 25860 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
nelson-liu/scikit-learn | examples/linear_model/plot_logistic_multinomial.py | 50 | 2480 | """
====================================================
Plot multinomial and One-vs-Rest Logistic Regression
====================================================
Plot decision surface of multinomial and One-vs-Rest Logistic Regression.
The hyperplanes corresponding to the three One-vs-Rest (OVR) classifiers
are represented by the dashed lines.
"""
print(__doc__)
# Authors: Tom Dupre la Tour <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# make 3-class dataset for classification
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# print the training scores
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# create a mesh to plot in
h = .02 # step size in the mesh
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# Plot also the training points
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 11 | 11915 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
"""Test the interpolating property for different kernels."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_true(np.allclose(y_pred, y))
assert_true(np.allclose(np.diag(y_cov), 0.))
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
""" Test that hyperparameter-optimization remains in bounds"""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
""" Test that GP prior has mean 0 and identical variances."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
""" Test that statistics of samples drawn from GP are correct."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
""" Test that kernel parameters are unmodified when optimizer is None."""
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
""" Test that predicted std.-dev. is consistent with cov's diagonal."""
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
""" Test that GPR can identify meaningful anisotropic length-scales. """
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
""" Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results
"""
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
""" Test that GPR can deal with multi-dimensional target values"""
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
""" Test that GPR can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_duplicate_input():
""" Test GPR can handle two different output-values for the same input. """
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
| bsd-3-clause |
mujiansu/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
bertrand-l/LearnML | examples/iris-naive_bayes.py | 1 | 1506 | #!/usr/bin/env python
"""
Naive Bayes classification on Anderson's iris data.
"""
from __future__ import division, print_function
import numpy as np
import learnml as ml
import learnml.datasets as datasets
import learnml.cross_validation as cv
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
if __name__ == '__main__':
np.random.seed(0)
# load the data
iris = datasets.load_iris()
print(iris)
X, y = iris
# split into a training and a testing set
X_learn, y_learn, X_test, y_test = cv.split_data(X, y, frac=3/4)
# train the (Gaussian) naive Bayes classifier
naive = ml.NaiveBayesGaussian()
naive.learn(X_learn, y_learn)
print(naive)
# make predictions on the testing set
y_hat = naive.predict(X_test)
print("Incorrect predictions on the testing set: {0:.2f} %"
.format(100. * (y_hat != y_test).sum() / len(y_test)))
if plt is not None:
# plot learning data
colord = {}
for c in naive.classes:
i = y_learn == c
lines, = plt.plot(X_learn[i, 2], X_learn[i, 3], 'x', label=c)
colord[c] = lines.get_color()
# plot predictions
for c in naive.classes:
i = y_hat == c
lines, = plt.plot(X_test[i, 2], X_test[i, 3], 'o', color=colord[c])
plt.legend(loc=2)
plt.xlabel('Petal length (cm)')
plt.ylabel('Petal width (cm)')
plt.savefig('iris-naive_bayes.pdf')
plt.show()
| bsd-3-clause |
HTAustin/classifying-text | bow_validate_tfidf.py | 3 | 1768 | #!/usr/bin/env python
# improved BOW validation script
# changes: leave stopwords in, use TF-IDF vectorizer, removed converting vectorizer output to np.array
import os
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression as LR
from sklearn.metrics import roc_auc_score as AUC
from KaggleWord2VecUtility import KaggleWord2VecUtility
#
data_file = 'data/labeledTrainData.tsv'
data = pd.read_csv( data_file, header = 0, delimiter= "\t", quoting = 3 )
train_i, test_i = train_test_split( np.arange( len( data )), train_size = 0.8, random_state = 44 )
train = data.ix[train_i]
test = data.ix[test_i]
#
print "Parsing train reviews..."
clean_train_reviews = []
for review in train['review']:
clean_train_reviews.append( " ".join( KaggleWord2VecUtility.review_to_wordlist( review )))
print "Parsing test reviews..."
clean_test_reviews = []
for review in test['review']:
clean_test_reviews.append( " ".join( KaggleWord2VecUtility.review_to_wordlist( review )))
#
print "Vectorizing..."
vectorizer = TfidfVectorizer( max_features = 40000, ngram_range = ( 1, 3 ),
sublinear_tf = True )
train_data_features = vectorizer.fit_transform( clean_train_reviews )
test_data_features = vectorizer.transform( clean_test_reviews )
# let's define a helper function
def train_and_eval_auc( model, train_x, train_y, test_x, test_y ):
model.fit( train_x, train_y )
p = model.predict_proba( test_x )
auc = AUC( test_y, p[:,1] )
return auc
#
lr = LR()
auc = train_and_eval_auc( lr, train_data_features, train["sentiment"], \
test_data_features, test["sentiment"].values )
print "logistic regression AUC:", auc
| bsd-2-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/linear_model/ridge.py | 6 | 47113 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model.center_data before your regression.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
start = {'coef': np.zeros(n_features + int(return_intercept))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
start)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_mean
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| mit |
soxofaan/luigi | examples/pyspark_wc.py | 17 | 3388 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.contrib.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100, significant=False)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
alisidd/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 61 | 3350 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using recurrent neural networks over characters for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
def char_rnn_model(features, target):
"""Character level recurrent neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.one_hot(features, 256, 1, 0)
byte_list = tf.unstack(byte_list, axis=1)
cell = tf.contrib.rnn.GRUCell(HIDDEN_SIZE)
_, encoding = tf.contrib.rnn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_rnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
bendalab/thunderfish | thunderfish/pulses.py | 3 | 80646 | """
Extract and cluster EOD waverforms of pulse-type electric fish.
## Main function
- `extract_pulsefish()`: checks for pulse-type fish based on the EOD amplitude and shape.
"""
import os
import numpy as np
from scipy import stats
from scipy.interpolate import interp1d
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN
from sklearn.mixture import BayesianGaussianMixture
from sklearn.metrics import pairwise_distances
from .eventdetection import detect_peaks, median_std_threshold
from .pulseplots import *
import warnings
def warn(*args, **kwargs):
"""
Ignore all warnings.
"""
pass
warnings.warn = warn
try:
from numba import jit
except ImportError:
def jit(*args, **kwargs):
def decorator_jit(func):
return func
return decorator_jit
# upgrade numpy functions for backwards compatibility:
if not hasattr(np, 'isin'):
np.isin = np.in1d
def unique_counts(ar):
""" Find the unique elements of an array and their counts, ignoring shape.
The code is condensed from numpy version 1.17.0.
Parameters
----------
ar : numpy array
Input array
Returns
-------
unique_vaulues : numpy array
Unique values in array ar.
unique_counts : numpy array
Number of instances for each unique value in ar.
"""
try:
return np.unique(ar, return_counts=True)
except TypeError:
ar = np.asanyarray(ar).flatten()
ar.sort()
mask = np.empty(ar.shape, dtype=np.bool_)
mask[:1] = True
mask[1:] = ar[1:] != ar[:-1]
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
return ar[mask], np.diff(idx)
###################################################################################
def extract_pulsefish(data, samplerate, width_factor_shape=3, width_factor_wave=8,
width_factor_display=4, verbose=0, plot_level=0, save_plots=False,
save_path='', ftype='png', return_data=[]):
""" Extract and cluster pulse-type fish EODs from single channel data.
Takes recording data containing an unknown number of pulsefish and extracts the mean
EOD and EOD timepoints for each fish present in the recording.
Parameters
----------
data: 1-D array of float
The data to be analysed.
samplerate: float
Sampling rate of the data in Hertz.
width_factor_shape : int or float (optional)
Width multiplier used for EOD shape analysis.
EOD snippets are extracted based on width between the
peak and trough multiplied by the width factor.
width_factor_wave : int or float (optional)
Width multiplier used for wavefish detection.
width_factor_display : int or float (optional)
Width multiplier used for EOD mean extraction and display.
verbose : int (optional)
Verbosity level.
plot_level : int (optional)
Similar to verbosity levels, but with plots.
Only set to > 0 for debugging purposes.
save_plots : bool (optional)
Set to True to save the plots created by plot_level.
save_path: string (optional)
Path for saving plots.
ftype : string (optional)
Define the filetype to save the plots in if save_plots is set to True.
Options are: 'png', 'jpg', 'svg' ...
return_data : list of strings (optional)
Specify data that should be logged and returned in a dictionary. Each clustering
step has a specific keyword that results in adding different variables to the log dictionary.
Optional keys for return_data and the resulting additional key-value pairs to the log dictionary are:
- 'all_eod_times':
- 'all_times': list of two lists of floats.
All peak (`all_times[0]`) and trough times (`all_times[1]`) extracted
by the peak detection algorithm. Times are given in seconds.
- 'eod_troughtimes': list of 1D arrays.
The timepoints in seconds of each unique extracted EOD cluster,
where each 1D array encodes one cluster.
- 'peak_detection':
- "data": 1D numpy array of floats.
Quadratically interpolated data which was used for peak detection.
- "interp_f": float.
Interpolation factor of raw data.
- "peaks_1": 1D numpy array of ints.
Peak indices on interpolated data after first peak detection step.
- "troughs_1": 1D numpy array of ints.
Peak indices on interpolated data after first peak detection step.
- "peaks_2": 1D numpy array of ints.
Peak indices on interpolated data after second peak detection step.
- "troughs_2": 1D numpy array of ints.
Peak indices on interpolated data after second peak detection step.
- "peaks_3": 1D numpy array of ints.
Peak indices on interpolated data after third peak detection step.
- "troughs_3": 1D numpy array of ints.
Peak indices on interpolated data after third peak detection step.
- "peaks_4": 1D numpy array of ints.
Peak indices on interpolated data after fourth peak detection step.
- "troughs_4": 1D numpy array of ints.
Peak indices on interpolated data after fourth peak detection step.
- 'all_cluster_steps':
- 'samplerate': float.
Samplerate of interpolated data.
- 'EOD_widths': list of three 1D numpy arrays.
The first list entry gives the unique labels of all width clusters
as a list of ints.
The second list entry gives the width values for each EOD in samples
as a 1D numpy array of ints.
The third list entry gives the width labels for each EOD
as a 1D numpy array of ints.
- 'EOD_heights': nested lists (2 layers) of three 1D numpy arrays.
The first list entry gives the unique labels of all height clusters
as a list of ints for each width cluster.
The second list entry gives the height values for each EOD
as a 1D numpy array of floats for each width cluster.
The third list entry gives the height labels for each EOD
as a 1D numpy array of ints for each width cluster.
- 'EOD_shapes': nested lists (3 layers) of three 1D numpy arrays
The first list entry gives the raw EOD snippets as a 2D numpy array
for each height cluster in a width cluster.
The second list entry gives the snippet PCA values for each EOD
as a 2D numpy array of floats for each height cluster in a width cluster.
The third list entry gives the shape labels for each EOD as a 1D numpy array
of ints for each height cluster in a width cluster.
- 'discarding_masks': Nested lists (two layers) of 1D numpy arrays.
The masks of EODs that are discarded by the discarding step of the algorithm.
The masks are 1D boolean arrays where instances that are set to True are
discarded by the algorithm. Discarding masks are saved in nested lists
that represent the width and height clusters.
- 'merge_masks': Nested lists (two layers) of 2D numpy arrays.
The masks of EODs that are discarded by the merging step of the algorithm.
The masks are 2D boolean arrays where for each sample point `i` either
`merge_mask[i,0]` or `merge_mask[i,1]` is set to True. Here, merge_mask[:,0]
represents the peak-centered clusters and `merge_mask[:,1]` represents the
trough-centered clusters. Merge masks are saved in nested lists that
represent the width and height clusters.
- 'BGM_width':
- 'BGM_width': dictionary
- 'x': 1D numpy array of floats.
BGM input values (in this case the EOD widths),
- 'use_log': boolean.
True if the z-scored logarithm of the data was used as BGM input.
- 'BGM': list of three 1D numpy arrays.
The first instance are the weights of the Gaussian fits.
The second instance are the means of the Gaussian fits.
The third instance are the variances of the Gaussian fits.
- 'labels': 1D numpy array of ints.
Labels defined by BGM model (before merging based on merge factor).
- xlab': string.
Label for plot (defines the units of the BGM data).
- 'BGM_height':
This key adds a new dictionary for each width cluster.
- 'BGM_height_*n*' : dictionary, where *n* defines the width cluster as an int.
- 'x': 1D numpy array of floats.
BGM input values (in this case the EOD heights),
- 'use_log': boolean.
True if the z-scored logarithm of the data was used as BGM input.
- 'BGM': list of three 1D numpy arrays.
The first instance are the weights of the Gaussian fits.
The second instance are the means of the Gaussian fits.
The third instance are the variances of the Gaussian fits.
- 'labels': 1D numpy array of ints.
Labels defined by BGM model (before merging based on merge factor).
- 'xlab': string.
Label for plot (defines the units of the BGM data).
- 'snippet_clusters':
This key adds a new dictionary for each height cluster.
- 'snippet_clusters*_n_m_p*' : dictionary, where *n* defines the width cluster
(int), *m* defines the height cluster (int) and *p* defines shape clustering
on peak or trough centered EOD snippets (string: 'peak' or 'trough').
- 'raw_snippets': 2D numpy array (nsamples, nfeatures).
Raw EOD snippets.
- 'snippets': 2D numpy array.
Normalized EOD snippets.
- 'features': 2D numpy array.(nsamples, nfeatures)
PCA values for each normalized EOD snippet.
- 'clusters': 1D numpy array of ints.
Cluster labels.
- 'samplerate': float.
Samplerate of snippets.
- 'eod_deletion':
This key adds two dictionaries for each (peak centered) shape cluster,
where *cluster* (int) is the unique shape cluster label.
- 'mask_*cluster*' : list of four booleans.
The mask for each cluster discarding step.
The first instance represents the artefact masks, where artefacts
are set to True.
The second instance represents the unreliable cluster masks,
where unreliable clusters are set to True.
The third instance represents the wavefish masks, where wavefish
are set to True.
The fourth instance represents the sidepeak masks, where sidepeaks
are set to True.
- 'vals_*cluster*' : list of lists.
All variables that are used for each cluster deletion step.
The first instance is a list of two 1D numpy arrays: the mean EOD and
the FFT of that mean EOD.
The second instance is a 1D numpy array with all EOD width to ISI ratios.
The third instance is a list with three entries:
The first entry is a 1D numpy array zoomed out version of the mean EOD.
The second entry is a list of two 1D numpy arrays that define the peak
and trough indices of the zoomed out mean EOD.
The third entry contains a list of two values that represent the
peak-trough pair in the zoomed out mean EOD with the largest height
difference.
- 'samplerate' : float.
EOD snippet samplerate.
- 'masks':
- 'masks' : 2D numpy array (4,N).
Each row contains masks for each EOD detected by the EOD peakdetection step.
The first row defines the artefact masks, the second row defines the
unreliable EOD masks,
the third row defines the wavefish masks and the fourth row defines
the sidepeak masks.
- 'moving_fish':
- 'moving_fish': dictionary.
- 'w' : list of floats.
Median width for each width cluster that the moving fish algorithm is
computed on (in seconds).
- 'T' : list of floats.
Lenght of analyzed recording for each width cluster (in seconds).
- 'dt' : list of floats.
Sliding window size (in seconds) for each width cluster.
- 'clusters' : list of 1D numpy int arrays.
Cluster labels for each EOD cluster in a width cluster.
- 't' : list of 1D numpy float arrays.
EOD emission times for each EOD in a width cluster.
- 'fishcount' : list of lists.
Sliding window timepoints and fishcounts for each width cluster.
- 'ignore_steps' : list of 1D int arrays.
Mask for fishcounts that were ignored (ignored if True) in the
moving_fish analysis.
Returns
-------
mean_eods: list of 2D arrays (3, eod_length)
The average EOD for each detected fish. First column is time in seconds,
second column the mean eod, third column the standard error.
eod_times: list of 1D arrays
For each detected fish the times of EOD peaks or troughs in seconds.
Use these timepoints for EOD averaging.
eod_peaktimes: list of 1D arrays
For each detected fish the times of EOD peaks in seconds.
zoom_window: tuple of floats
Start and endtime of suggested window for plotting EOD timepoints.
log_dict: dictionary
Dictionary with logged variables, where variables to log are specified
by `return_data`.
"""
if verbose > 0:
print('')
if verbose > 1:
print(70*'#')
print('##### extract_pulsefish', 46*'#')
if (save_plots and plot_level>0 and save_path):
# create folder to save things in.
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
save_path = ''
mean_eods, eod_times, eod_peaktimes, zoom_window = [], [], [], []
log_dict = {}
# extract peaks and interpolated data
x_peak, x_trough, eod_heights, eod_widths, i_samplerate, i_data, interp_f, pd_log_dict = \
extract_eod_times(data, samplerate,
np.max([width_factor_shape, width_factor_display, width_factor_wave]),
verbose=verbose-1, return_data=return_data, save_path=save_path)
if len(x_peak) > 0:
# cluster
clusters, x_merge, c_log_dict = cluster(x_peak, x_trough, eod_heights, eod_widths,
i_data, i_samplerate, interp_f,
width_factor_shape, width_factor_wave,
verbose=verbose-1, plot_level=plot_level-1,
save_plots=save_plots, save_path=save_path,
ftype=ftype, return_data=return_data)
# extract mean eods and times
mean_eods, eod_times, eod_peaktimes, eod_troughtimes, cluster_labels = \
extract_means(i_data, x_merge, x_peak, x_trough, eod_widths, clusters,
i_samplerate, width_factor_display, verbose=verbose-1)
# determine clipped clusters (save them, but ignore in other steps)
clusters, clipped_eods, clipped_times, clipped_peaktimes, clipped_troughtimes = \
find_clipped_clusters(clusters, mean_eods, eod_times, eod_peaktimes,
eod_troughtimes, cluster_labels, width_factor_display,
verbose=verbose-1)
# delete the moving fish
clusters, zoom_window, mf_log_dict = \
delete_moving_fish(clusters, x_merge/i_samplerate, len(data)/samplerate,
eod_heights, eod_widths/i_samplerate, i_samplerate,
verbose=verbose-1, plot_level=plot_level-1, save_plot=save_plots,
save_path=save_path, ftype=ftype, return_data=return_data)
if 'moving_fish' in return_data:
log_dict['moving_fish'] = mf_log_dict
clusters = remove_sparse_detections(clusters, eod_widths, i_samplerate,
len(data)/samplerate, verbose=verbose-1)
# extract mean eods
mean_eods, eod_times, eod_peaktimes, eod_troughtimes, cluster_labels = \
extract_means(i_data, x_merge, x_peak, x_trough, eod_widths,
clusters, i_samplerate, width_factor_display, verbose=verbose-1)
mean_eods.extend(clipped_eods)
eod_times.extend(clipped_times)
eod_peaktimes.extend(clipped_peaktimes)
eod_troughtimes.extend(clipped_troughtimes)
if plot_level > 0:
plot_all(data, eod_peaktimes, eod_troughtimes, samplerate, mean_eods)
if save_plots:
plt.savefig('%sextract_pulsefish_results.%s'%(save_path, ftype))
if save_plots:
plt.close('all')
if 'all_eod_times' in return_data:
log_dict['all_times'] = [x_peak/i_samplerate, x_trough/i_samplerate]
log_dict['eod_troughtimes'] = eod_troughtimes
log_dict.update(pd_log_dict)
log_dict.update(c_log_dict)
log_dict['samplerate'] = i_samplerate
if plot_level > 0:
# reset font family for the main thunderfish plot
rcParams['font.family'] = 'sans-serif'
return mean_eods, eod_times, eod_peaktimes, zoom_window, log_dict
def extract_eod_times(data, samplerate, width_factor, interp_freq=500000,
max_peakwidth=0.01, min_peakwidth=None, verbose=0,
return_data=[], save_path=''):
""" Extract peaks from data which are potentially EODs.
Parameters
----------
data: 1-D array of float
The data to be analysed.
samplerate: int or float
Sampling rate of the data
width_factor: int or float
Factor for extracting EOD shapes.
Only EODs are extracted that can fully be analysed with this width.
interp_freq: int or float (optional)
Desired resolution in Hz. Data will be interpolated to match this resolution.
max_peakwidth: float (optional)
Maximum width for peak detection in seconds.
min_peakwidth: int or None (optional)
Minimum width for peak detection in seconds.
If None, the minimum width is determined from the recording data
resolution (2/samplerate).
verbose : int (optional)
Verbosity level.
return_data : list of strings (optional)
Keys that specify data to be logged. If 'peak_detection' is in `return_data`,
data of this function is logged (see extract_pulsefish()).
save_path : string (optional)
Path to save data to. Only important if you wish to save data (save_data==True).
Returns
-------
x_peak: array of ints
Indices of EOD peaks in data.
x_trough: array of ints
Indices of EOD troughs in data. There is one x_trough for each x_peak.
eod_heights: array of floats
EOD heights for each x_peak.
eod_widths: array of ints
EOD widths for each x_peak (in samples).
samplerate: int or float
New samplerate (after interpolation).
data: 1-D array of floats
Interpolated data.
interpolation_factor: float
Factor used for interpolation.
peak_detection_result : dictionary
Key value pairs of logged data. Data to be logged is specified by return_data.
"""
peak_detection_result = {}
# standard deviation of data in small snippets:
threshold = median_std_threshold(data, samplerate) # TODO pass parameters
try:
interp_f = int(interp_freq/samplerate)
f = interp1d(range(len(data)), data, kind='quadratic')
data = f(np.arange(0, len(data)-1, 1/interp_f))
except MemoryError:
interp_f = 1
orig_x_peaks, orig_x_troughs = detect_peaks(data, threshold)
orig_x_peaks = orig_x_peaks.astype('int')
orig_x_troughs = orig_x_troughs.astype('int')
if verbose>0:
print('Peaks extracted: %5i'%(len(orig_x_peaks)))
if len(orig_x_peaks)<2 or len(orig_x_troughs)<2 or len(orig_x_peaks)>samplerate:
if verbose>0:
print('No peaks detected.')
return [], [], [], [], samplerate*interp_f, data, interp_f, peak_detection_result
else:
if min_peakwidth == None:
min_peakwidth = interp_f*2
else:
min_peakwidth = min_peakwidth*interp_freq
peaks, troughs, heights, widths, apeaks, atroughs, aheights, awidths = \
detect_eod_peaks(orig_x_peaks, orig_x_troughs, data, max_peakwidth*interp_freq,
min_peakwidth, verbose=verbose-1)
x_peaks, x_troughs, eod_heights, eod_widths = \
discard_connecting_eods(peaks, troughs, heights, widths, verbose=verbose-1)
if 'peak_detection' in return_data:
peak_detection_result = {"data": data,
"interp_f": interp_f,
"peaks_1": orig_x_peaks,
"troughs_1": orig_x_troughs,
"peaks_2": apeaks,
"troughs_2": atroughs,
"peaks_3": peaks,
"troughs_3": troughs,
"peaks_4": x_peaks,
"troughs_4": x_troughs
}
# only take those where the maximum cutwidth does not casue issues
# so if the width_factor times the width + x is more than length.
if len(eod_widths)==0:
if verbose>0:
print('No EOD peaks detected.')
return [], [], [], [], samplerate*interp_f,data, interp_f, peak_detection_result
cut_idx = ((x_peaks + np.max(eod_widths)*width_factor < len(data)) & (x_troughs + np.max(eod_widths)*width_factor < len(data)) & (x_peaks - np.max(eod_widths)*width_factor > 0) & (x_troughs - np.max(eod_widths)*width_factor > 0))
if verbose>0:
print('Remaining peaks after EOD extraction: %5i'%(len(cut_idx)))
if verbose>1:
print('Remaining peaks after deletion due to cutwidth: %5i'%(len(cut_idx)))
print('')
return x_peaks[cut_idx], x_troughs[cut_idx], eod_heights[cut_idx], eod_widths[cut_idx], samplerate*interp_f, data, interp_f, peak_detection_result
def detect_eod_peaks(main_event_positions, side_event_positions, data,
max_width=20, min_width=2, verbose=0):
""" Generate array of events that might be EODs of a pulse-type fish, using the location of peaks and troughs.
Parameters
----------
main_event_positions: array of int or float
Positions of the detected peaks in the data time series.
side_event_positions: array of int or float
Positions of the detected troughs in the data time series.
The complimentary event to the main events.
data: array of float
The data in which the events were detected.
max_width : int
Maximum EOD width (in samples).
min_width : int
Minimum EOD width (in samples).
verbose : int
Verbosity level.
Returns
-------
x_peak: numpy array of ints
Peak indices.
x_trough: numpy array of ints
Trough indices.
heights: numpy array of floats
Peak heights (distance between peak and trough amplitude)
widths: numpy array of ints
Peak widths (distance between peak and trough indices)
"""
# determine if there is a peak or through first (evaluates to 1 if there is a peak first):
mainfirst = int((min(main_event_positions[0], side_event_positions[0]) <
side_event_positions[0]))
# determine if there is a peak or through last (evaluates to 1 if there is a peak last):
mainlast = int((max(main_event_positions[-1], side_event_positions[-1]) >
side_event_positions[-1]))
x_peak = main_event_positions[mainfirst:len(main_event_positions)-mainlast]
ind = np.arange(len(x_peak))
y = data[x_peak]
# find indices of troughs on the right and left side of peaks:
l_side_ind = ind
r_side_ind = l_side_ind + 1
# compute x values, distance to peak and amplitude of right troughs:
r_side_x = side_event_positions[r_side_ind]
r_distance = np.abs(r_side_x - x_peak)
r_side_y = data[r_side_x]
# compute x values, distance to peak and amplitude of left troughs:
l_side_x = side_event_positions[l_side_ind]
l_distance = np.abs(x_peak - l_side_x)
l_side_y = data[l_side_x]
# compute slope of lines connecting the peaks to the nearest troughs on the right and left:
l_slope = np.abs((y-l_side_y)/l_distance)
r_slope = np.abs((y-r_side_y)/r_distance)
# determine which trough to assign to the peak by taking either the steepest slope,
# or, when slopes are similar on both sides (within 25% difference), take the trough
# with the maximum height difference to the peak.
trough_idxs = np.argmax(np.vstack((np.abs(y-l_side_y), np.abs(y-r_side_y))), axis=0)
slope_idxs = (np.abs(l_slope-r_slope)/(0.5*l_slope+0.5*r_slope) > 0.25)
trough_idxs[slope_idxs] = np.argmax(np.array(np.vstack(np.array([l_slope[slope_idxs],
r_slope[slope_idxs]]))),
axis=0)
# calculated using absolutes in case of for example troughs instead of peaks as main events:
right_or_left = np.vstack([np.abs(trough_idxs-1), trough_idxs])
heights = np.sum(np.vstack([np.abs(y-l_side_y), np.abs(y-r_side_y)])*right_or_left, axis=0)
widths = np.sum(np.vstack([l_distance, r_distance])*right_or_left, axis=0)
x_trough = np.sum((x_peak + np.vstack([-l_distance, r_distance]))*right_or_left, axis=0)
keep_events = ((widths>min_width) & (widths<max_width))
if verbose>0:
print('Number of peaks after connecting to sidepeaks: %5i'%(len(x_peak[keep_events])))
return x_peak[keep_events], x_trough[keep_events], heights[keep_events], widths[keep_events], x_peak, x_trough, heights, widths
@jit(nopython=True)
def discard_connecting_eods(x_peak, x_trough, heights, widths, verbose=0):
""" If two detected EODs share the same closest trough, keep only the highest peak.
Parameters
----------
x_peak: list of ints
Indices of EOD peaks.
x_trough: list of ints
Indices of EOD troughs.
heights: list of floats
EOD heights.
widths: list of ints
EOD widths.
verbose : int (optional)
Verbosity level.
Returns
-------
x_peak, x_trough, heights, widths : lists of ints and floats
EOD location and features of the non-discarded EODs
"""
keep_idxs = np.ones(len(x_peak))
for tr in np.unique(x_trough):
if len(x_trough[x_trough==tr]) > 1:
slopes = heights[x_trough==tr]/widths[x_trough==tr]
if (np.max(slopes)!=np.min(slopes)) and \
(np.abs(np.max(slopes)-np.min(slopes))/(0.5*np.max(slopes)+0.5*np.min(slopes)) > 0.25):
keep_idxs[np.where(x_trough==tr)[0][np.argmin(heights[x_trough==tr]/widths[x_trough==tr])]] = 0
else:
keep_idxs[np.where(x_trough==tr)[0][np.argmin(heights[x_trough==tr])]] = 0
return x_peak[np.where(keep_idxs==1)[0]], x_trough[np.where(keep_idxs==1)[0]], heights[np.where(keep_idxs==1)[0]], widths[np.where(keep_idxs==1)[0]]
def cluster(eod_xp, eod_xt, eod_heights, eod_widths, data, samplerate, interp_f,
width_factor_shape, width_factor_wave,
n_gaus_height=10, merge_threshold_height=0.1, n_gaus_width=3,
merge_threshold_width=0.5, minp=10,
verbose=0, plot_level=0, save_plots=False, save_path='', ftype='pdf',
return_data=[]):
""" Cluster EODs.
First cluster on EOD widths using a Bayesian Gaussian
Mixture (BGM) model, then cluster on EOD heights using a
BGM model. Lastly, cluster on EOD waveform with DBSCAN.
Clustering on EOD waveform is performed twice, once on
peak-centered EODs and once on trough-centered EODs.
Non-pulsetype EOD clusters are deleted, and clusters are
merged afterwards.
Parameters
----------
eod_xp : list of ints
Location of EOD peaks in indices.
eod_xt: list of ints
Locations of EOD troughs in indices.
eod_heights: list of floats
EOD heights.
eod_widths: list of ints
EOD widths in samples.
data: list of floats
Recording data.
samplerate : int or float
Sample rate of raw data.
interp_f: float
Interpolation factor used to obtain input data.
width_factor_shape : int or float
Multiplier for snippet extraction width. This factor is multiplied with the width
between the peak and through of a single EOD.
width_factor_wave : int or float
Multiplier for wavefish extraction width.
n_gaus_height : int (optional)
Number of gaussians to use for the clustering based on EOD height.
merge_threshold_height : float (optional)
Threshold for merging clusters that are similar in height.
n_gaus_width : int (optional)
Number of gaussians to use for the clustering based on EOD width.
merge_threshold_width : float (optional)
Threshold for merging clusters that are similar in width.
minp : int (optional)
Minimum number of points for core clusters (DBSCAN).
verbose : int (optional)
Verbosity level.
plot_level : int (optional)
Similar to verbosity levels, but with plots.
Only set to > 0 for debugging purposes.
save_plots : bool (optional)
Set to True to save created plots.
save_path : string (optional)
Path to save plots to. Only used if save_plots==True.
ftype : string (optional)
Filetype to save plot images in.
return_data : list of strings (optional)
Keys that specify data to be logged. Keys that can be used to log data
in this function are: 'all_cluster_steps', 'BGM_width', 'BGM_height',
'snippet_clusters', 'eod_deletion' (see extract_pulsefish()).
Returns
-------
labels : list of ints
EOD cluster labels based on height and EOD waveform.
x_merge : list of ints
Locations of EODs in clusters.
saved_data : dictionary
Key value pairs of logged data. Data to be logged is specified by return_data.
"""
saved_data = {}
if plot_level>0 or 'all_cluster_steps' in return_data:
all_heightlabels = []
all_shapelabels = []
all_snippets = []
all_features = []
all_heights = []
all_unique_heightlabels = []
all_p_clusters = np.ones(len(eod_xp))*-1
all_t_clusters = np.ones(len(eod_xp))*-1
artefact_masks_p = np.ones(len(eod_xp), dtype=bool)
artefact_masks_t = np.ones(len(eod_xp), dtype=bool)
x_merge = np.ones(len(eod_xp))*-1
max_label_p = 0 # keep track of the labels so that no labels are overwritten
max_label_t = 0
# loop only over height clusters that are bigger than minp
# first cluster on width
width_labels, bgm_log_dict = BGM(1000*eod_widths/samplerate, merge_threshold_width,
n_gaus_width, use_log=False, verbose=verbose-1,
plot_level=plot_level-1, xlabel='width [ms]',
save_plot=save_plots, save_path=save_path,
save_name='width', ftype=ftype, return_data=return_data)
saved_data.update(bgm_log_dict)
if verbose>0:
print('Clusters generated based on EOD width:')
[print('N_{} = {:>4} h_{} = {:.4f}'.format(l, len(width_labels[width_labels==l]), l, np.mean(eod_widths[width_labels==l]))) for l in np.unique(width_labels)]
w_labels, w_counts = unique_counts(width_labels)
unique_width_labels = w_labels[w_counts>minp]
for wi, width_label in enumerate(unique_width_labels):
# select only features in one width cluster at a time
w_eod_widths = eod_widths[width_labels==width_label]
w_eod_heights = eod_heights[width_labels==width_label]
w_eod_xp = eod_xp[width_labels==width_label]
w_eod_xt = eod_xt[width_labels==width_label]
wp_clusters = np.ones(len(w_eod_xp))*-1
wt_clusters = np.ones(len(w_eod_xp))*-1
wartefact_mask = np.ones(len(w_eod_xp))
# determine height labels
raw_p_snippets, p_snippets, p_features, p_bg_ratio = \
extract_snippet_features(data, w_eod_xp, w_eod_widths, w_eod_heights,
width_factor_shape)
raw_t_snippets, t_snippets, t_features, t_bg_ratio = \
extract_snippet_features(data, w_eod_xt, w_eod_widths, w_eod_heights,
width_factor_shape)
height_labels, bgm_log_dict = \
BGM(w_eod_heights,
min(merge_threshold_height, np.median(np.min(np.vstack([p_bg_ratio, t_bg_ratio]), axis=0))),
n_gaus_height, use_log=True, verbose=verbose-1, plot_level=plot_level-1,
xlabel='height [a.u.]', save_plot=save_plots, save_path=save_path,
save_name=('height_'+str(wi)), ftype=ftype, return_data=return_data)
saved_data.update(bgm_log_dict)
if verbose>0:
print('Clusters generated based on EOD height:')
[print('N_{} = {:>4} h_{} = {:.4f}'.format(l, len(height_labels[height_labels==l]), l, np.mean(w_eod_heights[height_labels==l]))) for l in np.unique(height_labels)]
h_labels, h_counts = unique_counts(height_labels)
unique_height_labels = h_labels[h_counts>minp]
if plot_level>0 or 'all_cluster_steps' in return_data:
all_heightlabels.append(height_labels)
all_heights.append(w_eod_heights)
all_unique_heightlabels.append(unique_height_labels)
shape_labels = []
cfeatures = []
csnippets = []
for hi, height_label in enumerate(unique_height_labels):
h_eod_widths = w_eod_widths[height_labels==height_label]
h_eod_heights = w_eod_heights[height_labels==height_label]
h_eod_xp = w_eod_xp[height_labels==height_label]
h_eod_xt = w_eod_xt[height_labels==height_label]
p_clusters = cluster_on_shape(p_features[height_labels==height_label],
p_bg_ratio, minp, verbose=0)
t_clusters = cluster_on_shape(t_features[height_labels==height_label],
t_bg_ratio, minp, verbose=0)
if plot_level>1:
plot_feature_extraction(raw_p_snippets[height_labels==height_label],
p_snippets[height_labels==height_label],
p_features[height_labels==height_label],
p_clusters, 1/samplerate, 0)
plt.savefig('%sDBSCAN_peak_w%i_h%i.%s'%(save_path, wi, hi, ftype))
plot_feature_extraction(raw_t_snippets[height_labels==height_label],
t_snippets[height_labels==height_label],
t_features[height_labels==height_label],
t_clusters, 1/samplerate, 1)
plt.savefig('%sDBSCAN_trough_w%i_h%i.%s'%(save_path, wi, hi, ftype))
if 'snippet_clusters' in return_data:
saved_data['snippet_clusters_%i_%i_peak'%(width_label, height_label)] = {
'raw_snippets':raw_p_snippets[height_labels==height_label],
'snippets':p_snippets[height_labels==height_label],
'features':p_features[height_labels==height_label],
'clusters':p_clusters,
'samplerate':samplerate}
saved_data['snippet_clusters_%i_%i_trough'%(width_label, height_label)] = {
'raw_snippets':raw_t_snippets[height_labels==height_label],
'snippets':t_snippets[height_labels==height_label],
'features':t_features[height_labels==height_label],
'clusters':t_clusters,
'samplerate':samplerate}
if plot_level>0 or 'all_cluster_steps' in return_data:
shape_labels.append([p_clusters, t_clusters])
cfeatures.append([p_features[height_labels==height_label],
t_features[height_labels==height_label]])
csnippets.append([p_snippets[height_labels==height_label],
t_snippets[height_labels==height_label]])
p_clusters[p_clusters==-1] = -max_label_p - 1
wp_clusters[height_labels==height_label] = p_clusters + max_label_p
max_label_p = max(np.max(wp_clusters), np.max(all_p_clusters)) + 1
t_clusters[t_clusters==-1] = -max_label_t - 1
wt_clusters[height_labels==height_label] = t_clusters + max_label_t
max_label_t = max(np.max(wt_clusters), np.max(all_t_clusters)) + 1
if verbose > 0:
if np.max(wp_clusters) == -1:
print('No EOD peaks in width cluster %i'%width_label)
elif len(np.unique(wp_clusters[wp_clusters!=-1]))>1:
print('%i different EOD peaks in width cluster %i'%(len(np.unique(wp_clusters[wp_clusters!=-1])), width_label))
if np.max(wt_clusters) == -1:
print('No EOD troughs in width cluster %i'%width_label)
elif len(np.unique(wt_clusters[wt_clusters!=-1]))>1:
print('%i different EOD troughs in width cluster %i'%(len(np.unique(wt_clusters[wt_clusters!=-1])), width_label))
if plot_level>0 or 'all_cluster_steps' in return_data:
all_shapelabels.append(shape_labels)
all_snippets.append(csnippets)
all_features.append(cfeatures)
# for each cluster, save fft + label
# so I end up with features for each label, and the masks.
# then I can extract e.g. first artefact or wave etc.
# remove artefacts here, based on the mean snippets ffts.
artefact_masks_p[width_labels==width_label], sdict = \
remove_artefacts(p_snippets, wp_clusters, interp_f, samplerate,
verbose=verbose-1, return_data=return_data)
saved_data.update(sdict)
artefact_masks_t[width_labels==width_label], _ = \
remove_artefacts(t_snippets, wt_clusters, interp_f, samplerate,
verbose=verbose-1, return_data=return_data)
# update maxlab so that no clusters are overwritten
all_p_clusters[width_labels==width_label] = wp_clusters
all_t_clusters[width_labels==width_label] = wt_clusters
# remove all non-reliable clusters
unreliable_fish_mask_p, saved_data = \
delete_unreliable_fish(all_p_clusters, eod_widths, eod_xp,
verbose=verbose-1, sdict=saved_data)
unreliable_fish_mask_t, _ = \
delete_unreliable_fish(all_t_clusters, eod_widths, eod_xt, verbose=verbose-1)
wave_mask_p, sidepeak_mask_p, saved_data = \
delete_wavefish_and_sidepeaks(data, all_p_clusters, eod_xp, eod_widths, interp_f,
width_factor_wave, verbose=verbose-1, sdict=saved_data)
wave_mask_t, sidepeak_mask_t, _ = \
delete_wavefish_and_sidepeaks(data, all_t_clusters, eod_xt, eod_widths, interp_f,
width_factor_wave, verbose=verbose-1)
og_clusters = [np.copy(all_p_clusters), np.copy(all_t_clusters)]
og_labels=np.copy(all_p_clusters+all_t_clusters)
# go through all clusters and masks??
all_p_clusters[(artefact_masks_p | unreliable_fish_mask_p | wave_mask_p | sidepeak_mask_p)] = -1
all_t_clusters[(artefact_masks_t | unreliable_fish_mask_t | wave_mask_t | sidepeak_mask_t)] = -1
# merge here.
all_clusters, x_merge, mask = merge_clusters(np.copy(all_p_clusters),
np.copy(all_t_clusters), eod_xp, eod_xt,
verbose=verbose-1)
if 'all_cluster_steps' in return_data or plot_level>0:
all_dmasks = []
all_mmasks = []
discarding_masks = \
np.vstack(((artefact_masks_p | unreliable_fish_mask_p | wave_mask_p | sidepeak_mask_p),
(artefact_masks_t | unreliable_fish_mask_t | wave_mask_t | sidepeak_mask_t)))
merge_mask = mask
# save the masks in the same formats as the snippets
for wi, (width_label, w_shape_label, heightlabels, unique_height_labels) in enumerate(zip(unique_width_labels, all_shapelabels, all_heightlabels, all_unique_heightlabels)):
w_dmasks = discarding_masks[:,width_labels==width_label]
w_mmasks = merge_mask[:,width_labels==width_label]
wd_2 = []
wm_2 = []
for hi, (height_label, h_shape_label) in enumerate(zip(unique_height_labels, w_shape_label)):
h_dmasks = w_dmasks[:,heightlabels==height_label]
h_mmasks = w_mmasks[:,heightlabels==height_label]
wd_2.append(h_dmasks)
wm_2.append(h_mmasks)
all_dmasks.append(wd_2)
all_mmasks.append(wm_2)
if plot_level>0:
plot_clustering(samplerate, [unique_width_labels, eod_widths, width_labels],
[all_unique_heightlabels, all_heights, all_heightlabels],
[all_snippets, all_features, all_shapelabels],
all_dmasks, all_mmasks)
if save_plots:
plt.savefig('%sclustering.%s'%(save_path, ftype))
if 'all_cluster_steps' in return_data:
saved_data = {'samplerate': samplerate,
'EOD_widths': [unique_width_labels, eod_widths, width_labels],
'EOD_heights': [all_unique_heightlabels, all_heights, all_heightlabels],
'EOD_shapes': [all_snippets, all_features, all_shapelabels],
'discarding_masks': all_dmasks,
'merge_masks': all_mmasks
}
if 'masks' in return_data:
saved_data = {'masks' : np.vstack(((artefact_masks_p & artefact_masks_t),
(unreliable_fish_mask_p & unreliable_fish_mask_t),
(wave_mask_p & wave_mask_t),
(sidepeak_mask_p & sidepeak_mask_t),
(all_p_clusters+all_t_clusters)))}
if verbose>0:
print('Clusters generated based on height, width and shape: ')
[print('N_{} = {:>4}'.format(int(l), len(all_clusters[all_clusters==l]))) for l in np.unique(all_clusters[all_clusters!=-1])]
return all_clusters, x_merge, saved_data
def BGM(x, merge_threshold=0.1, n_gaus=5, max_iter=200, n_init=5,
use_log=False, verbose=0, plot_level=0, xlabel='x [a.u.]',
save_plot=False, save_path='', save_name='', ftype='pdf', return_data=[]):
""" Use a Bayesian Gaussian Mixture Model to cluster one-dimensional data.
Additional steps are used to merge clusters that are closer than
`merge_threshold`. Broad gaussian fits that cover one or more other
gaussian fits are split by their intersections with the other
gaussians.
Parameters
----------
x : 1D numpy array
Features to compute clustering on.
merge_threshold : float (optional)
Ratio for merging nearby gaussians.
n_gaus: int (optional)
Maximum number of gaussians to fit on data.
max_iter : int (optional)
Maximum number of iterations for gaussian fit.
n_init : int (optional)
Number of initializations for the gaussian fit.
use_log: boolean (optional)
Set to True to compute the gaussian fit on the logarithm of x.
Can improve clustering on features with nonlinear relationships such as peak height.
verbose : int (optional)
Verbosity level.
plot_level : int (optional)
Similar to verbosity levels, but with plots.
Only set to > 0 for debugging purposes.
xlabel : string (optional)
Xlabel for displaying BGM plot.
save_plot : bool (optional)
Set to True to save created plot.
save_path : string (optional)
Path to location where data should be saved. Only used if save_plot==True.
save_name : string (optional)
Filename of the saved plot. Usefull as usually multiple BGM models are generated.
ftype : string (optional)
Filetype of plot image if save_plots==True.
return_data : list of strings (optional)
Keys that specify data to be logged. Keys that can be used to log data
in this function are: 'BGM_width' and/or 'BGM_height' (see extract_pulsefish()).
Returns
-------
labels : 1D numpy array
Cluster labels for each sample in x.
bgm_dict : dictionary
Key value pairs of logged data. Data to be logged is specified by return_data.
"""
bgm_dict = {}
if len(np.unique(x))>n_gaus:
BGM_model = BayesianGaussianMixture(n_gaus, max_iter=max_iter, n_init=n_init)
if use_log:
labels = BGM_model.fit_predict(stats.zscore(np.log(x)).reshape(-1, 1))
else:
labels = BGM_model.fit_predict(stats.zscore(x).reshape(-1, 1))
else:
return np.zeros(len(x))
if verbose>0:
if not BGM_model.converged_:
print('!!! Gaussian mixture did not converge !!!')
cur_labels = np.unique(labels)
# map labels to be increasing for increasing values for x
maxlab = len(np.unique(labels))
aso = np.argsort([np.median(x[labels==l]) for l in cur_labels]) + 100
for i, a in zip(cur_labels, aso):
labels[labels==i] = a
labels = labels - 100
# separate gaussian clusters that can be split by other clusters
splits = np.sort(np.copy(x))[1:][np.diff(labels[np.argsort(x)])!=0]
labels[:] = 0
for i, split in enumerate(splits):
labels[x>=split] = i+1
labels_before_merge = np.copy(labels)
# merge gaussian clusters that are closer than merge_threshold
labels = merge_gaussians(x, labels, merge_threshold)
if 'BGM_'+save_name.split('_')[0] in return_data or plot_level>0:
#sort model attributes by model_means_
means = [m[0] for m in BGM_model.means_]
weights = [w for w in BGM_model.weights_]
variances = [v[0][0] for v in BGM_model.covariances_]
weights = [w for _, w in sorted(zip(means, weights))]
variances = [v for _, v in sorted(zip(means, variances))]
means = sorted(means)
if plot_level>0:
plot_bgm(x, means, variances, weights, use_log, labels_before_merge,
labels, xlabel)
if save_plot:
plt.savefig('%sBGM_%s.%s'%(save_path, save_name, ftype))
if 'BGM_'+save_name.split('_')[0] in return_data:
bgm_dict['BGM_'+save_name] = {'x':x,
'use_log':use_log,
'BGM':[weights, means, variances],
'labels':labels_before_merge,
'xlab':xlabel}
return labels, bgm_dict
def merge_gaussians(x, labels, merge_threshold=0.1):
""" Merge all clusters which have medians which are near. Only works in 1D.
Parameters
----------
x : 1D array of ints or floats
Features used for clustering.
labels : 1D array of ints
Labels for each sample in x.
merge_threshold : float (optional)
Similarity threshold to merge clusters.
Returns
-------
labels : 1D array of ints
Merged labels for each sample in x.
"""
# compare all the means of the gaussians. If they are too close, merge them.
unique_labels = np.unique(labels[labels!=-1])
x_medians = [np.median(x[labels==l]) for l in unique_labels]
# fill a dict with the label mappings
mapping = {}
for label_1, x_m1 in zip(unique_labels, x_medians):
for label_2, x_m2 in zip(unique_labels, x_medians):
if label_1!=label_2:
if np.abs(np.diff([x_m1, x_m2]))/np.max([x_m1, x_m2]) < merge_threshold:
mapping[label_1] = label_2
# apply mapping
for map_key, map_value in mapping.items():
labels[labels==map_key] = map_value
return labels
def extract_snippet_features(data, eod_x, eod_widths, eod_heights, width_factor, n_pc=5):
""" Extract snippets from recording data, normalize them, and perform PCA.
Parameters
----------
data : 1D numpy array of floats
Recording data.
eod_x : 1D array of ints
Locations of EODs in indices.
eod_widths : 1D array of ints
EOD widths in samples.
eod_heights: 1D array of floats
EOD heights.
width_factor: int or float
Multiplier for extracting EOD snippets
n_pc : int (optional)
Number of PCs to use for PCA.
Returns
-------
raw_snippets : 2D numpy array (N, EOD_width)
Raw extracted EOD snippets.
snippets : 2D numpy array (N, EOD_width)
Normalized EOD snippets
features : 2D numpy array (N,n_pc)
PC values of EOD snippets
bg_ratio : 1D numpy array (N)
Ratio of the background activity slopes compared to EOD height.
"""
# extract snippets with corresponding width
width = width_factor*np.median(eod_widths)
raw_snippets = np.vstack([data[int(x-width):int(x+width)] for x in eod_x])
# subtract the slope and normalize the snippets
snippets, bg_ratio = subtract_slope(np.copy(raw_snippets), eod_heights)
snippets = StandardScaler().fit_transform(snippets.T).T
# scale so that the absolute integral = 1.
snippets = (snippets.T/np.sum(np.abs(snippets), axis=1)).T
# compute features for clustering on waveform
features = PCA(n_pc).fit(snippets).transform(snippets)
return raw_snippets, snippets, features, bg_ratio
def cluster_on_shape(features, bg_ratio, minp, percentile=80, max_epsilon=0.01,
slope_ratio_factor=4, min_cluster_fraction=0.01, verbose=0):
"""Separate EODs by their shape using DBSCAN.
Parameters
----------
features : 2D numpy array of floats (N, n_pc)
PCA features of each EOD in a recording.
bg_ratio : 1D array of floats
Ratio of background activity slope the EOD is superimposed on.
minp : int
Minimum number of points for core cluster (DBSCAN).
percentile : int (optional)
Percentile of KNN distribution, where K=minp, to use as epsilon for DBSCAN.
max_epsilon : float (optional)
Maximum epsilon to use for DBSCAN clustering. This is used to avoid adding
noisy clusters.
slope_ratio_factor : int or float (optional)
Influence of the slope-to-EOD ratio on the epsilon parameter.
A slope_ratio_factor of 4 means that slope-to-EOD ratios >1/4
start influencing epsilon.
min_cluster_fraction : float (optional)
Minimum fraction of all eveluated datapoint that can form a single cluster.
verbose : int (optional)
Verbosity level.
Returns
-------
labels : 1D array of ints
Merged labels for each sample in x.
"""
# determine clustering threshold from data
minpc = max(minp, int(len(features)*min_cluster_fraction))
knn = np.sort(pairwise_distances(features, features), axis=0)[minpc]
eps = min(max(1, slope_ratio_factor*np.median(bg_ratio))*max_epsilon,
np.percentile(knn, percentile))
if verbose>1:
print('epsilon = %f'%eps)
print('Slope to EOD ratio = %f'%np.median(bg_ratio))
# cluster on EOD shape
return DBSCAN(eps=eps, min_samples=minpc).fit(features).labels_
def subtract_slope(snippets, heights):
""" Subtract underlying slope from all EOD snippets.
Parameters
----------
snippets: 2-D numpy array
All EODs in a recorded stacked as snippets.
Shape = (number of EODs, EOD width)
heights: 1D numpy array
EOD heights.
Returns
-------
snippets: 2-D numpy array
EOD snippets with underlying slope subtracted.
bg_ratio : 1-D numpy array
EOD height/background activity height.
"""
left_y = snippets[:,0]
right_y = snippets[:,-1]
try:
slopes = np.linspace(left_y, right_y, snippets.shape[1])
except ValueError:
delta = (right_y - left_y)/snippets.shape[1]
slopes = np.arange(0, snippets.shape[1], dtype=snippets.dtype).reshape((-1,) + (1,) * np.ndim(delta))*delta + left_y
return snippets - slopes.T, np.abs(left_y-right_y)/heights
def remove_artefacts(all_snippets, clusters, int_f, samplerate, artefact_threshold=0.75,
verbose=0, return_data=[]):
""" Create a mask for EOD clusters that result from artefacts, based on power in low frequency spectrum.
Parameters
----------
all_snippets: 2D array
EOD snippets. Shape=(nEODs, EOD lenght)
clusters: list of ints
EOD cluster labels
int_f : float
Interpolation factor used for peak detection.
samplerate : int or float
Samplerate of original recording data.
artefact_threshold : float (optional)
Threshold that separates artefact from clean pulsefish clusters.
verbose : int (optional)
Verbosity level.
return_data : list of strings (optional)
Keys that specify data to be logged. The key that can be used to log data in this function is
'eod_deletion' (see extract_pulsefish()).
Returns
-------
mask: numpy array of booleans
Set to True for every EOD which is an artefact.
adict : dictionary
Key value pairs of logged data. Data to be logged is specified by return_data.
"""
adict = {}
mask = np.zeros(clusters.shape, dtype=bool)
for cluster in np.sort(np.unique(clusters[clusters>=0])):
snippets = all_snippets[clusters==cluster]
mean_eod = np.mean(snippets, axis=0)
mean_eod = mean_eod - np.mean(mean_eod)
cut_fft = int(len(np.fft.fft(mean_eod))/2)
low_frequency_ratio = np.sum(np.abs(np.fft.fft(mean_eod))[:int(cut_fft/(2*int_f))])/np.sum(np.abs(np.fft.fft(mean_eod))[:int(cut_fft)])
freqs = np.linspace(0, samplerate, cut_fft)
if low_frequency_ratio < artefact_threshold:
mask[clusters==cluster] = True
if verbose>0:
print('Deleting cluster %i, which has a low frequency ratio of %f'%(cluster,low_frequency_ratio))
if 'eod_deletion' in return_data:
adict['vals_'+str(int(cluster))] = [mean_eod, np.abs(np.fft.fft(mean_eod))[:int(cut_fft/int_f)]]
adict['mask_'+str(int(cluster))] = [any(mask[clusters==cluster])]
return mask, adict
def delete_unreliable_fish(clusters, eod_widths, eod_x, verbose=0, sdict={}):
""" Create a mask for EOD clusters that are either mixed with noise or other fish, or wavefish.
This is the case when the ration between the EOD width and the ISI is too large.
Parameters
----------
clusters : list of ints
Cluster labels.
eod_widths : list of floats or ints
EOD widths in samples or seconds.
eod_x : list of ints or floats
EOD times in samples or seconds.
verbose : int (optional)
Verbosity level.
sdict : dictionary
Dictionary that is used to log data. This is only used if a dictionary
was created by remove_artefacts().
For logging data in noise and wavefish discarding steps, see remove_artefacts().
Returns
-------
mask : numpy array of booleans
Set to True for every unreliable EOD.
sdict : dictionary
Key value pairs of logged data. Data is only logged if a dictionary
was instantiated by remove_artefacts().
"""
mask = np.zeros(clusters.shape, dtype=bool)
for i, cluster in enumerate(np.unique(np.sort(clusters[clusters>=0]))):
if np.max(np.median(eod_widths[clusters==cluster])/np.diff(eod_x[cluster==clusters])) > 0.5:
if verbose>0:
print('deleting unreliable cluster %i, score=%f'%(cluster, np.max(np.median(eod_widths[clusters==cluster])/np.diff(eod_x[cluster==clusters]))))
mask[clusters==cluster] = True
if 'vals_'+str(int(cluster)) in sdict:
sdict['vals_'+str(int(cluster))].append(np.median(eod_widths[clusters==cluster])/np.diff(eod_x[cluster==clusters]))
sdict['mask_'+str(int(cluster))].append(any(mask[clusters==cluster]))
return mask, sdict
def delete_wavefish_and_sidepeaks(data, clusters, eod_x, eod_widths, interp_f,
w_factor, max_phases=4, verbose=0, sdict={}):
""" Create a mask for EODs that are likely from wavefish, or sidepeaks of bigger EODs.
Parameters
----------
data : list of floats
Raw recording data.
clusters : list of ints
Cluster labels.
eod_x : list of ints
Indices of EOD times.
eod_widths : list of ints
EOD widths in samples.
interp_f : float
Factor used to interpolate original data.
w_factor : float or int
Multiplier for EOD analysis width.
max_phases : int (optional)
Maximum number of phases for any EOD.
If the mean EOD has more phases than this, it is not a pulse EOD.
verbose : int (optional)
Verbosity level.
sdict : dictionary
Dictionary that is used to log data. This is only used if a dictionary
was created by remove_artefacts().
For logging data in noise and wavefish discarding steps, see remove_artefacts().
Returns
-------
mask_wave: numpy array of booleans
Set to True for every EOD which is a wavefish EOD.
mask_sidepeak: numpy array of booleans
Set to True for every snippet which is centered around a sidepeak of an EOD.
sdict : dictionary
Key value pairs of logged data. Data is only logged if a dictionary
was instantiated by remove_artefacts().
"""
mask_wave = np.zeros(clusters.shape, dtype=bool)
mask_sidepeak = np.zeros(clusters.shape, dtype=bool)
for i, cluster in enumerate(np.sort(np.unique(clusters[clusters>=0]))):
if cluster < 0:
continue
cutwidth = np.mean(eod_widths[clusters==cluster])*w_factor
current_x = eod_x[(eod_x>cutwidth) & (eod_x<(len(data)-cutwidth))]
current_clusters = clusters[(eod_x>cutwidth) & (eod_x<(len(data)-cutwidth))]
snippets = np.vstack([data[int(x-cutwidth):int(x+cutwidth)]
for x in current_x[current_clusters==cluster]])
# extract information on main peaks and troughs:
mean_eod = np.mean(snippets, axis=0)
mean_eod = mean_eod - np.mean(mean_eod)
# detect peaks and troughs on data + some maxima/minima at the
# end, so that the sides are also condidered for peak detection:
pk, tr = detect_peaks(np.concatenate([[-10*mean_eod[0]], mean_eod, [10*mean_eod[-1]]]),
(np.std(mean_eod)))
pk = pk[(pk>0)&(pk<len(mean_eod))]
tr = tr[(tr>0)&(tr<len(mean_eod))]
if len(pk)>0 and len(tr)>0:
idxs = np.sort(np.concatenate((pk, tr)))
slopes = np.abs(np.diff(mean_eod[idxs]))
m_slope = np.argmax(slopes)
centered = np.min(np.abs(idxs[m_slope:m_slope+2] - int(len(mean_eod)/2)))
# compute all height differences of peaks and troughs within snippets.
# if they are all similar, it is probably noise or a wavefish.
idxs = np.sort(np.concatenate((pk, tr)))
hdiffs = np.diff(mean_eod[idxs])
if centered>interp_f*2:
if verbose>0:
print('Deleting cluster %i, which is a sidepeak'%cluster)
mask_sidepeak[clusters==cluster] = True
w_diff = np.abs(np.diff(np.sort(np.concatenate((pk, tr)))))
if np.abs(np.diff(idxs[m_slope:m_slope+2])) < np.mean(eod_widths[clusters==cluster])*0.5 or len(pk) + len(tr)>max_phases or np.min(w_diff)>2*cutwidth/w_factor: #or len(hdiffs[np.abs(hdiffs)>0.5*(np.max(mean_eod)-np.min(mean_eod))])>max_phases:
if verbose>0:
print('Deleting cluster %i, which is a wavefish'%cluster)
mask_wave[clusters==cluster] = True
if 'vals_'+str(int(cluster)) in sdict:
sdict['vals_'+str(int(cluster))].append([mean_eod, [pk, tr],
idxs[m_slope:m_slope+2]])
sdict['mask_'+str(int(cluster))].append(any(mask_wave[clusters==cluster]))
sdict['mask_'+str(int(cluster))].append(any(mask_sidepeak[clusters==cluster]))
return mask_wave, mask_sidepeak, sdict
def merge_clusters(clusters_1, clusters_2, x_1, x_2, verbose=0):
""" Merge clusters resulting from two clustering methods.
This method only works if clustering is performed on the same EODs
with the same ordering, where there is a one to one mapping from
clusters_1 to clusters_2.
Parameters
----------
clusters_1: list of ints
EOD cluster labels for cluster method 1.
clusters_2: list of ints
EOD cluster labels for cluster method 2.
x_1: list of ints
Indices of EODs for cluster method 1 (clusters_1).
x_2: list of ints
Indices of EODs for cluster method 2 (clusters_2).
verbose : int (optional)
Verbosity level.
Returns
-------
clusters : list of ints
Merged clusters.
x_merged : list of ints
Merged cluster indices.
mask : 2d numpy array of ints (N, 2)
Mask for clusters that are selected from clusters_1 (mask[:,0]) and
from clusters_2 (mask[:,1]).
"""
if verbose > 0:
print('\nMerge cluster:')
# these arrays become 1 for each EOD that is chosen from that array
c1_keep = np.zeros(len(clusters_1))
c2_keep = np.zeros(len(clusters_2))
# add n to one of the cluster lists to avoid overlap
ovl = np.max(clusters_1) + 1
clusters_2[clusters_2!=-1] = clusters_2[clusters_2!=-1] + ovl
remove_clusters = [[]]
keep_clusters = []
og_clusters = [np.copy(clusters_1), np.copy(clusters_2)]
# loop untill done
while True:
# compute unique clusters and cluster sizes
# of cluster that have not been iterated over:
c1_labels, c1_size = unique_counts(clusters_1[(clusters_1!=-1) & (c1_keep == 0)])
c2_labels, c2_size = unique_counts(clusters_2[(clusters_2!=-1) & (c2_keep == 0)])
# if all clusters are done, break from loop:
if len(c1_size) == 0 and len(c2_size) == 0:
break
# if the biggest cluster is in c_p, keep this one and discard all clusters
# on the same indices in c_t:
elif np.argmax([np.max(np.append(c1_size, 0)), np.max(np.append(c2_size, 0))]) == 0:
# remove all the mappings from the other indices
cluster_mappings, _ = unique_counts(clusters_2[clusters_1==c1_labels[np.argmax(c1_size)]])
clusters_2[np.isin(clusters_2, cluster_mappings)] = -1
c1_keep[clusters_1==c1_labels[np.argmax(c1_size)]] = 1
remove_clusters.append(cluster_mappings)
keep_clusters.append(c1_labels[np.argmax(c1_size)])
if verbose > 0:
print('Keep cluster %i of group 1, delete clusters %s of group 2'%(c1_labels[np.argmax(c1_size)], str(cluster_mappings[cluster_mappings!=-1] - ovl)))
# if the biggest cluster is in c_t, keep this one and discard all mappings in c_p
elif np.argmax([np.max(np.append(c1_size, 0)), np.max(np.append(c2_size, 0))]) == 1:
# remove all the mappings from the other indices
cluster_mappings, _ = unique_counts(clusters_1[clusters_2==c2_labels[np.argmax(c2_size)]])
clusters_1[np.isin(clusters_1, cluster_mappings)] = -1
c2_keep[clusters_2==c2_labels[np.argmax(c2_size)]] = 1
remove_clusters.append(cluster_mappings)
keep_clusters.append(c2_labels[np.argmax(c2_size)])
if verbose > 0:
print('Keep cluster %i of group 2, delete clusters %s of group 1'%(c2_labels[np.argmax(c2_size)] - ovl, str(cluster_mappings[cluster_mappings!=-1])))
# combine results
clusters = (clusters_1+1)*c1_keep + (clusters_2+1)*c2_keep - 1
x_merged = (x_1)*c1_keep + (x_2)*c2_keep
return clusters, x_merged, np.vstack([c1_keep, c2_keep])
def extract_means(data, eod_x, eod_peak_x, eod_tr_x, eod_widths, clusters, samplerate,
w_factor, verbose=0):
""" Extract mean EODs and EOD timepoints for each EOD cluster.
Parameters
----------
data: list of floats
Raw recording data.
eod_x: list of ints
Locations of EODs in samples.
eod_peak_x : list of ints
Locations of EOD peaks in samples.
eod_tr_x : list of ints
Locations of EOD troughs in samples.
eod_widths: list of ints
EOD widths in samples.
clusters: list of ints
EOD cluster labels
samplerate: float
samplerate of recording
w_factor : float
Multiplication factor for window used to extract EOD.
verbose : int (optional)
Verbosity level.
Returns
-------
mean_eods: list of 2D arrays (3, eod_length)
The average EOD for each detected fish. First column is time in seconds,
second column the mean eod, third column the standard error.
eod_times: list of 1D arrays
For each detected fish the times of EOD in seconds.
eod_peak_times: list of 1D arrays
For each detected fish the times of EOD peaks in seconds.
eod_trough_times: list of 1D arrays
For each detected fish the times of EOD troughs in seconds.
eod_labels: list of ints
Cluster label for each detected fish.
"""
mean_eods, eod_times, eod_peak_times, eod_tr_times, eod_heights, cluster_labels = [], [], [], [], [], []
for cluster in np.unique(clusters):
if cluster!=-1:
cutwidth = np.mean(eod_widths[clusters==cluster])*w_factor
current_x = eod_x[(eod_x>cutwidth) & (eod_x<(len(data)-cutwidth))]
current_clusters = clusters[(eod_x>cutwidth) & (eod_x<(len(data)-cutwidth))]
snippets = np.vstack([data[int(x-cutwidth):int(x+cutwidth)] for x in current_x[current_clusters==cluster]])
mean_eod = np.mean(snippets, axis=0)
eod_time = np.arange(len(mean_eod))/samplerate - cutwidth/samplerate
mean_eod = np.vstack([eod_time, mean_eod, np.std(snippets, axis=0)])
mean_eods.append(mean_eod)
eod_times.append(eod_x[clusters==cluster]/samplerate)
eod_heights.append(np.min(mean_eod)-np.max(mean_eod))
eod_peak_times.append(eod_peak_x[clusters==cluster]/samplerate)
eod_tr_times.append(eod_tr_x[clusters==cluster]/samplerate)
cluster_labels.append(cluster)
return [m for _, m in sorted(zip(eod_heights, mean_eods))], [t for _, t in sorted(zip(eod_heights, eod_times))], [pt for _, pt in sorted(zip(eod_heights, eod_peak_times))], [tt for _, tt in sorted(zip(eod_heights, eod_tr_times))], [c for _, c in sorted(zip(eod_heights, cluster_labels))]
def find_clipped_clusters(clusters, mean_eods, eod_times, eod_peaktimes, eod_troughtimes,
cluster_labels, width_factor, clip_threshold=0.9, verbose=0):
""" Detect EODs that are clipped and set all clusterlabels of these clipped EODs to -1.
Also return the mean EODs and timepoints of these clipped EODs.
Parameters
----------
clusters: array of ints
Cluster labels for each EOD in a recording.
mean_eods: list of numpy arrays
Mean EOD waveform for each cluster.
eod_times: list of numpy arrays
EOD timepoints for each EOD cluster.
eod_peaktimes
EOD peaktimes for each EOD cluster.
eod_troughtimes
EOD troughtimes for each EOD cluster.
cluster_labels: numpy array
Unique EOD clusterlabels.
w_factor : float or int
Factor used for mean EOD extraction.
clip_threshold: float
Threshold for detecting clipped EODs.
verbose: int
Verbosity level.
Returns
-------
clusters : array of ints
Cluster labels for each EOD in the recording, where clipped EODs have been set to -1.
clipped_eods : list of numpy arrays
Mean EOD waveforms for each clipped EOD cluster.
clipped_times : list of numpy arrays
EOD timepoints for each clipped EOD cluster.
clipped_peaktimes : list of numpy arrays
EOD peaktimes for each clipped EOD cluster.
clipped_troughtimes : list of numpy arrays
EOD troughtimes for each clipped EOD cluster.
"""
clipped_eods, clipped_times, clipped_peaktimes, clipped_troughtimes, clipped_labels = [], [], [], [], []
for mean_eod, eod_time, eod_peaktime, eod_troughtime,label in zip(mean_eods, eod_times, eod_peaktimes, eod_troughtimes, cluster_labels):
if (np.count_nonzero(mean_eod[1]>clip_threshold) > len(mean_eod[1])/(width_factor*2)) or (np.count_nonzero(mean_eod[1] < -clip_threshold) > len(mean_eod[1])/(width_factor*2)):
clipped_eods.append(mean_eod)
clipped_times.append(eod_time)
clipped_peaktimes.append(eod_peaktime)
clipped_troughtimes.append(eod_troughtime)
clipped_labels.append(label)
if verbose>0:
print('clipped pulsefish')
clusters[np.isin(clusters, clipped_labels)] = -1
return clusters, clipped_eods, clipped_times, clipped_peaktimes, clipped_troughtimes
def delete_moving_fish(clusters, eod_t, T, eod_heights, eod_widths, samplerate,
min_dt=0.25, stepsize=0.05, sliding_window_factor=2000,
verbose=0, plot_level=0, save_plot=False, save_path='',
ftype='pdf', return_data=[]):
"""
Use a sliding window to detect the minimum number of fish detected simultaneously,
then delete all other EOD clusters.
Do this only for EODs within the same width clusters, as a
moving fish will preserve its EOD width.
Parameters
----------
clusters: list of ints
EOD cluster labels.
eod_t: list of floats
Timepoints of the EODs (in seconds).
T: float
Length of recording (in seconds).
eod_heights: list of floats
EOD amplitudes.
eod_widths: list of floats
EOD widths (in seconds).
samplerate: float
Recording data samplerate.
min_dt : float (optional)
Minimum sliding window size (in seconds).
stepsize : float (optional)
Sliding window stepsize (in seconds).
sliding_window_factor : int or float
Multiplier for sliding window width,
where the sliding window width = median(EOD_width)*sliding_window_factor.
verbose : int (optional)
Verbosity level.
plot_level : int (optional)
Similar to verbosity levels, but with plots.
Only set to > 0 for debugging purposes.
save_plot : bool (optional)
Set to True to save the plots created by plot_level.
save_path : string (optional)
Path to save data to. Only important if you wish to save data (save_data==True).
ftype : string (optional)
Define the filetype to save the plots in if save_plots is set to True.
Options are: 'png', 'jpg', 'svg' ...
return_data : list of strings (optional)
Keys that specify data to be logged. The key that can be used to log data
in this function is 'moving_fish' (see extract_pulsefish()).
Returns
-------
clusters : list of ints
Cluster labels, where deleted clusters have been set to -1.
window : list of 2 floats
Start and end of window selected for deleting moving fish in seconds.
mf_dict : dictionary
Key value pairs of logged data. Data to be logged is specified by return_data.
"""
mf_dict = {}
if len(np.unique(clusters[clusters!=-1])) == 0:
return clusters, [0, 1], {}
all_keep_clusters = []
width_classes = merge_gaussians(eod_widths, np.copy(clusters), 0.75)
all_windows = []
all_dts = []
ev_num = 0
wc_num = len(np.unique(width_classes[clusters>=0]))
for iw, w in enumerate(np.unique(width_classes[clusters>=0])):
# initialize variables
min_clusters = 100
average_height = 0
sparse_clusters = 100
keep_clusters = []
dt = max(min_dt, np.median(eod_widths[width_classes==w])*sliding_window_factor)
window_start = 0
window_end = dt
wclusters = clusters[width_classes==w]
weod_t = eod_t[width_classes==w]
weod_heights = eod_heights[width_classes==w]
weod_widths = eod_widths[width_classes==w]
all_dts.append(dt)
if verbose>0:
print('sliding window dt = %f'%dt)
# make W dependent on width??
ignore_steps = np.zeros(len(np.arange(0, T-dt+stepsize, stepsize)))
for i, t in enumerate(np.arange(0, T-dt+stepsize, stepsize)):
current_clusters = wclusters[(weod_t>=t)&(weod_t<t+dt)&(wclusters!=-1)]
if len(np.unique(current_clusters))==0:
ignore_steps[i-int(dt/stepsize):i+int(dt/stepsize)] = 1
if verbose>0:
print('No pulsefish in recording at T=%.2f:%.2f'%(t, t+dt))
x = np.arange(0, T-dt+stepsize, stepsize)
y = np.ones(len(x))
running_sum = np.ones(len(np.arange(0, T+stepsize, stepsize)))
ulabs = np.unique(wclusters[wclusters>=0])
# sliding window
for j, (t, ignore_step) in enumerate(zip(x, ignore_steps)):
current_clusters = wclusters[(weod_t>=t)&(weod_t<t+dt)&(wclusters!=-1)]
current_widths = weod_widths[(weod_t>=t)&(weod_t<t+dt)&(wclusters!=-1)]
y[j] = len(np.unique(current_clusters))
if (len(np.unique(current_clusters)) <= min_clusters) and \
(ignore_step==0) and \
(len(np.unique(current_clusters) !=1)):
current_labels = np.isin(wclusters, np.unique(current_clusters))
current_height = np.mean(weod_heights[current_labels])
# compute nr of clusters that are too sparse
clusters_after_deletion = np.unique(remove_sparse_detections(np.copy(clusters[np.isin(clusters, np.unique(current_clusters))]), samplerate*eod_widths[np.isin(clusters, np.unique(current_clusters))], samplerate, T))
current_sparse_clusters = len(np.unique(current_clusters)) - len(clusters_after_deletion[clusters_after_deletion!=-1])
if current_sparse_clusters <= sparse_clusters and \
((current_sparse_clusters<sparse_clusters) or
(current_height > average_height) or
(len(np.unique(current_clusters)) < min_clusters)):
keep_clusters = np.unique(current_clusters)
min_clusters = len(np.unique(current_clusters))
average_height = current_height
window_end = t+dt
sparse_clusters = current_sparse_clusters
all_keep_clusters.append(keep_clusters)
all_windows.append(window_end)
if 'moving_fish' in return_data or plot_level>0:
if 'w' in mf_dict:
mf_dict['w'].append(np.median(eod_widths[width_classes==w]))
mf_dict['T'] = T
mf_dict['dt'].append(dt)
mf_dict['clusters'].append(wclusters)
mf_dict['t'].append(weod_t)
mf_dict['fishcount'].append([x+0.5*(x[1]-x[0]), y])
mf_dict['ignore_steps'].append(ignore_steps)
else:
mf_dict['w'] = [np.median(eod_widths[width_classes==w])]
mf_dict['T'] = [T]
mf_dict['dt'] = [dt]
mf_dict['clusters'] = [wclusters]
mf_dict['t'] = [weod_t]
mf_dict['fishcount'] = [[x+0.5*(x[1]-x[0]), y]]
mf_dict['ignore_steps'] = [ignore_steps]
if verbose>0:
print('Estimated nr of pulsefish in recording: %i'%len(all_keep_clusters))
if plot_level>0:
plot_moving_fish(mf_dict['w'], mf_dict['dt'], mf_dict['clusters'],mf_dict['t'],
mf_dict['fishcount'], T, mf_dict['ignore_steps'])
if save_plot:
plt.savefig('%sdelete_moving_fish.%s'%(save_path, ftype))
# empty dict
if 'moving_fish' not in return_data:
mf_dict = {}
# delete all clusters that are not selected
clusters[np.invert(np.isin(clusters, np.concatenate(all_keep_clusters)))] = -1
return clusters, [np.max(all_windows)-np.max(all_dts), np.max(all_windows)], mf_dict
def remove_sparse_detections(clusters, eod_widths, samplerate, T,
min_density=0.0005, verbose=0):
""" Remove all EOD clusters that are too sparse
Parameters
----------
clusters : list of ints
Cluster labels.
eod_widths : list of ints
Cluster widths in samples.
samplerate : int or float
Samplerate.
T : int or float
Lenght of recording in seconds.
min_density : float (optional)
Minimum density for realistic EOD detections.
verbose : int (optional)
Verbosity level.
Returns
-------
clusters : list of ints
Cluster labels, where sparse clusters have been set to -1.
"""
for c in np.unique(clusters):
if c!=-1:
n = len(clusters[clusters==c])
w = np.median(eod_widths[clusters==c])/samplerate
if n*w < T*min_density:
if verbose>0:
print('cluster %i is too sparse'%c)
clusters[clusters==c] = -1
return clusters
| gpl-3.0 |
pysb/pysb | pysb/simulator/cupsoda.py | 5 | 27740 | from pysb.simulator.base import Simulator, SimulatorException, SimulationResult
import pysb
import pysb.bng
import numpy as np
from scipy.constants import N_A
import os
import re
import subprocess
import tempfile
import time
import logging
from pysb.logging import EXTENDED_DEBUG
import shutil
from pysb.pathfinder import get_path
import sympy
import collections
from collections.abc import Iterable
try:
import pandas as pd
except ImportError:
pd = None
try:
import pycuda.driver as cuda
except ImportError:
cuda = None
class CupSodaSimulator(Simulator):
"""An interface for running cupSODA, a CUDA implementation of LSODA.
cupSODA is a graphics processing unit (GPU)-based implementation of the
LSODA simulation algorithm (see references). It requires an NVIDIA GPU
card with support for the CUDA framework version 7 or above. Further
details of cupSODA and software can be found on github:
https://github.com/aresio/cupSODA
The simplest way to install cupSODA is to use a pre-compiled version,
which can be downloaded from here:
https://github.com/aresio/cupSODA/releases
Parameters
----------
model : pysb.Model
Model to integrate.
tspan : vector-like, optional
Time values at which the integrations are sampled. The first and last
values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
N_SIMS x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are N_SIMS x number of
parameters.
verbose : bool or int, optional
Verbosity level, see :class:`pysb.simulator.base.Simulator` for
further details.
**kwargs: dict, optional
Extra keyword arguments, including:
* ``gpu``: Index of GPU to run on (default: 0)
* ``vol``: System volume; required if model encoded in extrinsic
(number) units (default: None)
* ``obs_species_only``: Only output species contained in observables
(default: True)
* ``cleanup``: Delete all temporary files after the simulation is
finished. Includes both BioNetGen and cupSODA files. Useful for
debugging (default: True)
* ``prefix``: Prefix for the temporary directory containing cupSODA
input and output files (default: model name)
* ``base_dir``: Directory in which temporary directory with cupSODA
input and output files are placed (default: system directory
determined by `tempfile.mkdtemp`)
* ``integrator``: Name of the integrator to use; see
`default_integrator_options` (default: 'cupsoda')
* ``integrator_options``: A dictionary of keyword arguments to
supply to the integrator; see `default_integrator_options`.
Attributes
----------
model : pysb.Model
Model passed to the constructor.
tspan : numpy.ndarray
Time values passed to the constructor.
initials : numpy.ndarray
Initial species concentrations for all simulations. Dimensions are
number of simulations x number of species.
param_values : numpy.ndarray
Parameters for all simulations. Dimensions are number of simulations
x number of parameters.
verbose: bool or int
Verbosity setting. See the base class
:class:`pysb.simulator.base.Simulator` for further details.
gpu : int or list
Index of GPU being run on, or a list of integers to use multiple GPUs.
Simulations will be split equally among the of GPUs.
outdir : str
Directory where cupSODA output files are placed. Input files are
also placed here.
opts: dict
Dictionary of options for the integrator, which can include the
following:
* vol (float or None): System volume
* n_blocks (int or None): Number of GPU blocks used by the simulator
* atol (float): Absolute integrator tolerance
* rtol (float): Relative integrator tolerance
* chunksize (int or None): The maximum number of simulations to run
per GPU at one time. Set this option if your GPU is running out of
memory.
* memory_usage ('global', 'shared', or 'sharedconstant'): The type of
GPU memory to use
* max_steps (int): The maximum number of internal integrator iterations
(equivalent to LSODA's mxstep)
integrator : str
Name of the integrator in use (only "cupsoda" is supported).
Notes
-----
1. If `vol` is defined, species amounts and rate constants are assumed
to be in number units and are automatically converted to concentration
units before generating the cupSODA input files. The species
concentrations returned by cupSODA are converted back to number units
during loading.
2. If `obs_species_only` is True, only the species contained within
observables are output by cupSODA. All other concentrations are set
to 'nan'.
References
----------
1. Harris, L.A., Nobile, M.S., Pino, J.C., Lubbock, A.L.R., Besozzi, D.,
Mauri, G., Cazzaniga, P., and Lopez, C.F. 2017. GPU-powered model
analysis with PySB/cupSODA. Bioinformatics 33, pp.3492-3494.
2. Nobile M.S., Cazzaniga P., Besozzi D., Mauri G., 2014. GPU-accelerated
simulations of mass-action kinetics models with cupSODA, Journal of
Supercomputing, 69(1), pp.17-24.
3. Petzold, L., 1983. Automatic selection of methods for solving stiff and
nonstiff systems of ordinary differential equations. SIAM journal on
scientific and statistical computing, 4(1), pp.136-148.
"""
_supports = {'multi_initials': True, 'multi_param_values': True}
_memory_options = {'global': '0', 'shared': '1', 'sharedconstant': '2'}
default_integrator_options = {
# some sane default options for a few well-known integrators
'cupsoda': {
'max_steps': 20000, # max # of internal iterations (LSODA's MXSTEP)
'atol': 1e-8, # absolute tolerance
'rtol': 1e-8, # relative tolerance
'chunksize': None, # Max number of simulations per GPU per run
'n_blocks': None, # number of GPU blocks
'memory_usage': 'sharedconstant'}} # see _memory_options dict
_integrator_options_allowed = {'max_steps', 'atol', 'rtol', 'n_blocks',
'memory_usage', 'vol', 'chunksize'}
def __init__(self, model, tspan=None, initials=None, param_values=None,
verbose=False, **kwargs):
super(CupSodaSimulator, self).__init__(model, tspan=tspan,
initials=initials,
param_values=param_values,
verbose=verbose, **kwargs)
self.gpu = kwargs.pop('gpu', (0, ))
if not isinstance(self.gpu, Iterable):
self.gpu = [self.gpu]
self._obs_species_only = kwargs.pop('obs_species_only', True)
self._cleanup = kwargs.pop('cleanup', True)
self._prefix = kwargs.pop('prefix', self._model.name)
# Sanitize the directory - cupsoda doesn't handle spaces etc. well
self._prefix = re.sub('[^0-9a-zA-Z]', '_', self._prefix)
self._base_dir = kwargs.pop('base_dir', None)
self.integrator = kwargs.pop('integrator', 'cupsoda')
integrator_options = kwargs.pop('integrator_options', {})
if kwargs:
raise ValueError('Unknown keyword argument(s): {}'.format(
', '.join(kwargs.keys())
))
unknown_integrator_options = set(integrator_options.keys()).difference(
self._integrator_options_allowed
)
if unknown_integrator_options:
raise ValueError(
'Unknown integrator_options: {}. Allowed options: {}'.format(
', '.join(unknown_integrator_options),
', '.join(self._integrator_options_allowed)
)
)
# generate the equations for the model
pysb.bng.generate_equations(self._model, self._cleanup, self.verbose)
# build integrator options list from our defaults and any kwargs
# passed to this function
options = {}
if self.default_integrator_options.get(self.integrator):
options.update(self.default_integrator_options[
self.integrator]) # default options
else:
raise SimulatorException(
"Integrator type '" + self.integrator + "' not recognized.")
options.update(integrator_options) # overwrite
# defaults
self.opts = options
self._out_species = None
# private variables (to reduce the number of function calls)
self._len_rxns = len(self._model.reactions)
self._len_species = len(self._model.species)
self._len_params = len(self._model.parameters)
self._model_parameters_rules = self._model.parameters_rules()
# Set cupsoda verbosity level
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= EXTENDED_DEBUG:
self._cupsoda_verbose = 2
elif logger_level <= logging.DEBUG:
self._cupsoda_verbose = 1
else:
self._cupsoda_verbose = 0
# regex for extracting cupSODA reported running time
self._running_time_regex = re.compile(r'Running time:\s+(\d+\.\d+)')
def _run_chunk(self, gpus, outdir, chunk_idx, cmtx, sims, trajectories,
tout):
_indirs = {}
_outdirs = {}
p = {}
# Path to cupSODA executable
bin_path = get_path('cupsoda')
# Start simulations
for gpu in gpus:
_indirs[gpu] = os.path.join(outdir, "INPUT_GPU{}_{}".format(
gpu, chunk_idx))
os.mkdir(_indirs[gpu])
_outdirs[gpu] = os.path.join(outdir, "OUTPUT_GPU{}_{}".format(
gpu, chunk_idx))
# Create cupSODA input files
self._create_input_files(_indirs[gpu], sims[gpu], cmtx)
# Build command
# ./cupSODA input_model_folder blocks output_folder simulation_
# file_prefix gpu_number fitness_calculation memory_use dump
command = [bin_path, _indirs[gpu], str(self.n_blocks),
_outdirs[gpu], self._prefix, str(gpu),
'0', self._memory_usage, str(self._cupsoda_verbose)]
self._logger.info("Running cupSODA: " + ' '.join(command))
# Run simulation and return trajectories
p[gpu] = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Read results
for gpu in gpus:
(p_out, p_err) = p[gpu].communicate()
p_out = p_out.decode('utf-8')
p_err = p_err.decode('utf-8')
logger_level = self._logger.logger.getEffectiveLevel()
if logger_level <= logging.INFO:
run_time_match = self._running_time_regex.search(p_out)
if run_time_match:
self._logger.info('cupSODA GPU {} chunk {} reported '
'time: {} seconds'.format(
gpu,
chunk_idx,
run_time_match.group(1)))
self._logger.debug('cupSODA GPU {} chunk {} stdout:\n{}'.format(
gpu, chunk_idx, p_out))
if p_err:
self._logger.error('cupSODA GPU {} chunk {} '
'stderr:\n{}'.format(
gpu, chunk_idx, p_err))
if p[gpu].returncode:
raise SimulatorException(
"cupSODA GPU {} chunk {} exception:\n{}\n{}".format(
gpu, chunk_idx, p_out.rstrip("at line"), p_err.rstrip()
)
)
tout_run, trajectories_run = self._load_trajectories(
_outdirs[gpu], sims[gpu])
if trajectories is None:
tout = tout_run
trajectories = trajectories_run
else:
tout = np.concatenate((tout, tout_run))
trajectories = np.concatenate(
(trajectories, trajectories_run))
return tout, trajectories
def run(self, tspan=None, initials=None, param_values=None):
"""Perform a set of integrations.
Returns a :class:`.SimulationResult` object.
Parameters
----------
tspan : list-like, optional
Time values at which the integrations are sampled. The first and
last values define the time range.
initials : list-like, optional
Initial species concentrations for all simulations. Dimensions are
number of simulation x number of species.
param_values : list-like, optional
Parameters for all simulations. Dimensions are number of
simulations x number of parameters.
Returns
-------
A :class:`SimulationResult` object
Notes
-----
1. An exception is thrown if `tspan` is not defined in either
`__init__`or `run`.
2. If neither `initials` nor `param_values` are defined in either
`__init__` or `run` a single simulation is run with the initial
concentrations and parameter values defined in the model.
"""
super(CupSodaSimulator, self).run(tspan=tspan, initials=initials,
param_values=param_values,
_run_kwargs=[])
# Create directories for cupSODA input and output files
_outdirs = {}
_indirs = {}
start_time = time.time()
cmtx = self._get_cmatrix()
outdir = tempfile.mkdtemp(prefix=self._prefix + '_',
dir=self._base_dir)
self._logger.debug("Output directory is %s" % outdir)
# Set up chunking (enforce max # sims per GPU per run)
n_sims = len(self.param_values)
chunksize_gpu = self.opts.get('chunksize', None)
if chunksize_gpu is None:
chunksize_gpu = n_sims
chunksize_total = chunksize_gpu * len(self.gpu)
tout = None
trajectories = None
chunks = np.array_split(range(n_sims),
np.ceil(n_sims / chunksize_total))
try:
for chunk_idx, chunk in enumerate(chunks):
self._logger.debug('cupSODA chunk {} of {}'.format(
(chunk_idx + 1), len(chunks)))
# Split chunk equally between GPUs
sims = dict(zip(self.gpu, np.array_split(chunk,
len(self.gpu))))
tout, trajectories = self._run_chunk(
self.gpu, outdir, chunk_idx, cmtx, sims,
trajectories, tout)
finally:
if self._cleanup:
shutil.rmtree(outdir)
end_time = time.time()
self._logger.info("cupSODA + I/O time: {} seconds".format(
end_time - start_time))
return SimulationResult(self, tout, trajectories)
@property
def _memory_usage(self):
try:
return self._memory_options[self.opts['memory_usage']]
except KeyError:
raise Exception('memory_usage must be one of %s',
self._memory_options.keys())
@property
def vol(self):
vol = self.opts.get('vol', None)
return vol
@vol.setter
def vol(self, volume):
self.opts['vol'] = volume
@property
def n_blocks(self):
n_blocks = self.opts.get('n_blocks')
if n_blocks is None:
default_threads_per_block = 32
bytes_per_float = 4
memory_per_thread = (self._len_species + 1) * bytes_per_float
if cuda is None:
threads_per_block = default_threads_per_block
else:
cuda.init()
device = cuda.Device(self.gpu[0])
attrs = device.get_attributes()
shared_memory_per_block = attrs[
cuda.device_attribute.MAX_SHARED_MEMORY_PER_BLOCK]
upper_limit_threads_per_block = attrs[
cuda.device_attribute.MAX_THREADS_PER_BLOCK]
max_threads_per_block = min(
shared_memory_per_block / memory_per_thread,
upper_limit_threads_per_block)
threads_per_block = min(max_threads_per_block,
default_threads_per_block)
n_blocks = int(
np.ceil(1. * len(self.param_values) / threads_per_block))
self._logger.debug('n_blocks set to {} (used pycuda: {})'.format(
n_blocks, cuda is not None
))
self.n_blocks = n_blocks
return n_blocks
@n_blocks.setter
def n_blocks(self, n_blocks):
if not isinstance(n_blocks, int):
raise ValueError("n_blocks must be an integer")
if n_blocks <= 0:
raise ValueError("n_blocks must be greater than 0")
self.opts['n_blocks'] = n_blocks
def _create_input_files(self, directory, sims, cmtx):
# atol_vector
with open(os.path.join(directory, "atol_vector"), 'w') as atol_vector:
for i in range(self._len_species):
atol_vector.write(str(self.opts.get('atol')))
if i < self._len_species - 1:
atol_vector.write("\n")
# c_matrix
with open(os.path.join(directory, "c_matrix"), 'w') as c_matrix:
for i in sims:
line = ""
for j in range(self._len_rxns):
if j > 0:
line += "\t"
line += str(cmtx[i][j])
c_matrix.write(line)
if i != sims[-1]:
c_matrix.write("\n")
# cs_vector
with open(os.path.join(directory, "cs_vector"), 'w') as cs_vector:
self._out_species = range(self._len_species) # species to output
if self._obs_species_only:
self._out_species = [False for sp in self._model.species]
for obs in self._model.observables:
for i in obs.species:
self._out_species[i] = True
self._out_species = [i for i in range(self._len_species) if
self._out_species[i] is True]
for i in range(len(self._out_species)):
if i > 0:
cs_vector.write("\n")
cs_vector.write(str(self._out_species[i]))
# left_side
with open(os.path.join(directory, "left_side"), 'w') as left_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stoich = 0
for k in self._model.reactions[i]['reactants']:
if j == k:
stoich += 1
line += str(stoich)
if i < self._len_rxns - 1:
left_side.write(line + "\n")
else:
left_side.write(line)
# max_steps
with open(os.path.join(directory, "max_steps"), 'w') as mxsteps:
mxsteps.write(str(self.opts['max_steps']))
# model_kind
with open(os.path.join(directory, "modelkind"), 'w') as model_kind:
# always set modelkind to 'deterministic'
model_kind.write("deterministic")
# MX_0
with open(os.path.join(directory, "MX_0"), 'w') as MX_0:
mx0 = self.initials
# if a volume has been defined, rescale populations
# by N_A*vol to get concentration
# (NOTE: act on a copy of self.initials, not
# the original, which we don't want to modify)
if self.vol:
mx0 = mx0.copy()
mx0 /= (N_A * self.vol)
for i in sims:
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
line += str(mx0[i][j])
MX_0.write(line)
if i != sims[-1]:
MX_0.write("\n")
# right_side
with open(os.path.join(directory, "right_side"), 'w') as right_side:
for i in range(self._len_rxns):
line = ""
for j in range(self._len_species):
if j > 0:
line += "\t"
stochiometry = 0
for k in self._model.reactions[i]['products']:
if j == k:
stochiometry += 1
line += str(stochiometry)
if i < self._len_rxns - 1:
right_side.write(line + "\n")
else:
right_side.write(line)
# rtol
with open(os.path.join(directory, "rtol"), 'w') as rtol:
rtol.write(str(self.opts.get('rtol')))
# t_vector
with open(os.path.join(directory, "t_vector"), 'w') as t_vector:
for t in self.tspan:
t_vector.write(str(float(t)) + "\n")
# time_max
with open(os.path.join(directory, "time_max"), 'w') as time_max:
time_max.write(str(float(self.tspan[-1])))
def _get_cmatrix(self):
if self.model.tags:
raise ValueError('cupSODA does not currently support local '
'functions')
self._logger.debug("Constructing the c_matrix:")
c_matrix = np.zeros((len(self.param_values), self._len_rxns))
par_names = [p.name for p in self._model_parameters_rules]
rate_mask = np.array([p in self._model_parameters_rules for p in
self._model.parameters])
rate_args = []
par_vals = self.param_values[:, rate_mask]
rate_order = []
for rxn in self._model.reactions:
rate_args.append([arg for arg in rxn['rate'].atoms(sympy.Symbol) if
not arg.name.startswith('__s')])
reactants = len(rxn['reactants'])
rate_order.append(reactants)
output = 0.01 * len(par_vals)
output = int(output) if output > 1 else 1
for i in range(len(par_vals)):
if i % output == 0:
self._logger.debug(str(int(round(100. * i / len(par_vals)))) +
"%")
for j in range(self._len_rxns):
rate = 1.0
for r in rate_args[j]:
if isinstance(r, pysb.Parameter):
rate *= par_vals[i][par_names.index(r.name)]
elif isinstance(r, pysb.Expression):
raise ValueError('cupSODA does not currently support '
'models with Expressions')
else:
rate *= r
# volume correction
if self.vol:
rate *= (N_A * self.vol) ** (rate_order[j] - 1)
c_matrix[i][j] = rate
self._logger.debug("100%")
return c_matrix
def _load_trajectories(self, directory, sims):
"""Read simulation results from output files.
Returns `tout` and `trajectories` arrays.
"""
files = [filename for filename in os.listdir(directory) if
re.match(self._prefix, filename)]
if len(files) == 0:
raise SimulatorException(
"Cannot find any output files to load data from.")
if len(files) != len(sims):
raise SimulatorException(
"Number of output files (%d) does not match number "
"of requested simulations (%d)." % (
len(files), len(sims)))
n_sims = len(files)
trajectories = [None] * n_sims
tout = [None] * n_sims
traj_n = np.ones((len(self.tspan), self._len_species)) * float('nan')
tout_n = np.ones(len(self.tspan)) * float('nan')
# load the data
indir_prefix = os.path.join(directory, self._prefix)
for idx, n in enumerate(sims):
trajectories[idx] = traj_n.copy()
tout[idx] = tout_n.copy()
filename = indir_prefix + "_" + str(idx)
if not os.path.isfile(filename):
raise Exception("Cannot find input file " + filename)
# determine optimal loading method
if idx == 0:
(data, use_pandas) = self._test_pandas(filename)
# load data
else:
if use_pandas:
data = self._load_with_pandas(filename)
else:
data = self._load_with_openfile(filename)
# store data
tout[idx] = data[:, 0]
trajectories[idx][:, self._out_species] = data[:, 1:]
# volume correction
if self.vol:
trajectories[idx][:, self._out_species] *= (N_A * self.vol)
return np.array(tout), np.array(trajectories)
def _test_pandas(self, filename):
""" calculates the fastest method to load in data
Parameters
----------
filename : str
filename to laod in
Returns
-------
np.array, bool
"""
# using open(filename,...)
start = time.time()
data = self._load_with_openfile(filename)
end = time.time()
load_time_openfile = end - start
# using pandas
if pd:
start = time.time()
self._load_with_pandas(filename)
end = time.time()
load_time_pandas = end - start
if load_time_pandas < load_time_openfile:
return data, True
return data, False
@staticmethod
def _load_with_pandas(filename):
data = pd.read_csv(filename, sep='\t', skiprows=None,
header=None).to_numpy()
return data
@staticmethod
def _load_with_openfile(filename):
with open(filename, 'r') as f:
data = [line.rstrip('\n').split() for line in f]
data = np.array(data, dtype=np.float, copy=False)
return data
def run_cupsoda(model, tspan, initials=None, param_values=None,
integrator='cupsoda', cleanup=True, verbose=False, **kwargs):
"""Wrapper method for running cupSODA simulations.
Parameters
----------
See ``CupSodaSimulator`` constructor.
Returns
-------
SimulationResult.all : list of record arrays
List of trajectory sets. The first dimension contains species,
observables and expressions (in that order)
"""
sim = CupSodaSimulator(model, tspan=tspan, integrator=integrator,
cleanup=cleanup, verbose=verbose, **kwargs)
simres = sim.run(initials=initials, param_values=param_values)
return simres.all
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.